diff --git a/.cargo/config.toml b/.cargo/config.toml index feaf5fec86d..41566339d9c 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -12,4 +12,3 @@ clippy-stacks = "clippy -p libstackerdb -p stacks-signer -p pox-locking -p clari #[target.x86_64-unknown-linux-gnu] #linker = "/usr/bin/clang" #rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] - diff --git a/.dockerignore b/.dockerignore index aa66cbcb378..ac0df83528a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,14 +1,3 @@ Dockerfile* target -integration_tests/blockstack-consensus-data/ -integration_tests/test-out/ -api/data -.git -.venv .dockerignore -testnet/index.html -testnet.log -testnet-logs* -legacy -build-scripts -dist \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 81cf2d82850..e4582d8ca72 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,6 @@ legacy/* linguist-vendored -* text=lf +# Enforcing 'lf' eol mainly for: +# - 'stx-genesis' package, where txt files need hash computation and comparison +# - 'clarity' package, where clarity language is sensitive to line endings for .clar files +# anyhow, setting eol for all text files to have a homogeneous management over the whole code base +* text eol=lf diff --git a/.github/actions/dockerfiles/Dockerfile.debian-source b/.github/actions/dockerfiles/Dockerfile.debian-source deleted file mode 100644 index 80c434e8d5f..00000000000 --- a/.github/actions/dockerfiles/Dockerfile.debian-source +++ /dev/null @@ -1,28 +0,0 @@ -FROM rust:bookworm as build - -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' -ARG BUILD_DIR=/build -ARG TARGET=x86_64-unknown-linux-gnu -# Allow us to override the default `--target-cpu` for the given target triplet -ARG TARGET_CPU -ENV RUSTFLAGS="${TARGET_CPU:+${RUSTFLAGS} -Ctarget-cpu=${TARGET_CPU}}" -WORKDIR /src - -COPY . . - -RUN apt-get update && apt-get install -y git libclang-dev - -# Run all the build steps in ramdisk in an attempt to speed things up -RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ - && cd ${BUILD_DIR} \ - && rustup target add ${TARGET} \ - && rustup component add rustfmt \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ - && mkdir -p /out \ - && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out - -FROM --platform=${TARGETPLATFORM} debian:bookworm -COPY --from=build /out/stacks-node /out/stacks-signer /out/stacks-inspect /bin/ -CMD ["stacks-node", "mainnet"] diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 6bd63f11a77..419a1b6e1bf 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -70,30 +70,13 @@ jobs: - test-name: tests::neon_integrations::lockup_integration - test-name: tests::neon_integrations::most_recent_utxo_integration_test - test-name: tests::neon_integrations::run_with_custom_wallet - - test-name: tests::neon_integrations::test_competing_miners_build_anchor_blocks_and_microblocks_on_same_chain - test-name: tests::neon_integrations::test_competing_miners_build_anchor_blocks_on_same_chain_without_rbf - test-name: tests::neon_integrations::test_one_miner_build_anchor_blocks_on_same_chain_without_rbf - test-name: tests::signer::v0::tenure_extend_after_2_bad_commits - test-name: tests::stackerdb::test_stackerdb_event_observer - test-name: tests::stackerdb::test_stackerdb_load_store - # Microblock tests that are no longer needed on every CI run - # (microblocks are unsupported starting in Epoch 2.5) - - test-name: tests::neon_integrations::bad_microblock_pubkey - - test-name: tests::neon_integrations::microblock_fork_poison_integration_test - - test-name: tests::neon_integrations::microblock_integration_test - - test-name: tests::neon_integrations::microblock_large_tx_integration_test_FLAKY - - test-name: tests::neon_integrations::microblock_limit_hit_integration_test - - test-name: tests::neon_integrations::microblock_miner_multiple_attempts - - test-name: tests::neon_integrations::test_problematic_microblocks_are_not_mined - - test-name: tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored - - test-name: tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test - - test-name: tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test - - test-name: tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test - - test-name: tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test - - test-name: tests::epoch_25::microblocks_disabled # Epoch tests are covered by the epoch-tests CI workflow, and don't need to run # on every PR (for older epochs) - - test-name: tests::epoch_205::bigger_microblock_streams_in_2_05 - test-name: tests::epoch_205::test_cost_limit_switch_version205 - test-name: tests::epoch_205::test_dynamic_db_method_costs - test-name: tests::epoch_205::test_exact_block_costs @@ -123,6 +106,13 @@ jobs: - test-name: tests::epoch_24::verify_auto_unlock_behavior # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition - test-name: tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY + # These mempool tests take a long time to run, and are meant to be run manually + - test-name: tests::nakamoto_integrations::large_mempool_original_constant_fee + - test-name: tests::nakamoto_integrations::large_mempool_original_random_fee + - test-name: tests::nakamoto_integrations::large_mempool_next_constant_fee + - test-name: tests::nakamoto_integrations::large_mempool_next_random_fee + - test-name: tests::nakamoto_integrations::larger_mempool + - test-name: tests::signer::v0::larger_mempool steps: ## Setup test environment diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e32148c06fc..d97393761ac 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -102,147 +102,3 @@ jobs: signer_docker_tag: ${{ needs.check-release.outputs.signer_docker_tag }} is_node_release: ${{ needs.check-release.outputs.is_node_release }} is_signer_release: ${{ needs.check-release.outputs.is_signer_release }} - secrets: inherit - - ## Build and push Debian image built from source - ## - ## Runs when: - ## - it is not a node or signer-only release run - docker-image: - if: | - needs.check-release.outputs.is_node_release != 'true' || - needs.check-release.outputs.is_signer_release != 'true' - name: Docker Image (Source) - uses: ./.github/workflows/image-build-source.yml - needs: - - rustfmt - - check-release - secrets: inherit - - ## Create a reusable cache for tests - ## - ## Runs when: - ## - it is a node release run - ## or any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - create-cache: - if: | - needs.check-release.outputs.is_node_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: Create Test Cache - needs: - - rustfmt - - check-release - uses: ./.github/workflows/create-cache.yml - - ## Tests to run regularly - ## - ## Runs when: - ## - it is a node or signer-only release run - ## or any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - stacks-core-tests: - if: | - needs.check-release.outputs.is_node_release == 'true' || - needs.check-release.outputs.is_signer_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: Stacks Core Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/stacks-core-tests.yml - - ## Checks to run on built binaries - ## - ## Runs when: - ## - it is a node or signer-only release run - ## or any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - stacks-core-build-tests: - if: | - needs.check-release.outputs.is_node_release == 'true' || - needs.check-release.outputs.is_signer_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: Stacks Core Build Tests - needs: - - rustfmt - - check-release - uses: ./.github/workflows/core-build-tests.yml - - ## Checks to run on built binaries - ## - ## Runs when: - ## - it is a node release run - ## or any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - bitcoin-tests: - if: | - needs.check-release.outputs.is_node_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: Bitcoin Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/bitcoin-tests.yml - - p2p-tests: - if: | - needs.check-release.outputs.is_node_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: P2P Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/p2p-tests.yml - - ## Test to run on a tagged release - ## - ## Runs when: - ## - it is a node release run - atlas-tests: - if: needs.check-release.outputs.is_node_release == 'true' - name: Atlas Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/atlas-tests.yml - - epoch-tests: - if: needs.check-release.outputs.is_node_release == 'true' - name: Epoch Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/epoch-tests.yml - - slow-tests: - if: needs.check-release.outputs.is_node_release == 'true' - name: Slow Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/slow-tests.yml diff --git a/.github/workflows/clarity-js-sdk-pr.yml b/.github/workflows/clarity-js-sdk-pr.yml index 6bcd555ca9f..e369f8a583b 100644 --- a/.github/workflows/clarity-js-sdk-pr.yml +++ b/.github/workflows/clarity-js-sdk-pr.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout latest clarity js sdk id: git_checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: token: ${{ secrets.GH_TOKEN }} repository: ${{ env.CLARITY_JS_SDK_REPOSITORY }} @@ -46,7 +46,7 @@ jobs: - name: Create Pull Request id: create_pr - uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # v6.0.5 + uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7.0.6 with: token: ${{ secrets.GH_TOKEN }} commit-message: "chore: update clarity-native-bin tag" diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index 2279d42c882..048e9fdc036 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -22,13 +22,13 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@v3 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Define Rust Toolchain id: define_rust_toolchain run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV - name: Setup Rust Toolchain id: setup_rust_toolchain - uses: actions-rust-lang/setup-rust-toolchain@v1 + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 with: toolchain: ${{ env.RUST_TOOLCHAIN }} components: clippy diff --git a/.github/workflows/core-build-tests.yml b/.github/workflows/core-build-tests.yml index 393e2ff6b03..614f3f69c3d 100644 --- a/.github/workflows/core-build-tests.yml +++ b/.github/workflows/core-build-tests.yml @@ -12,13 +12,13 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@v3 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Define Rust Toolchain id: define_rust_toolchain run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV - name: Setup Rust Toolchain id: setup_rust_toolchain - uses: actions-rust-lang/setup-rust-toolchain@v1 + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 with: toolchain: ${{ env.RUST_TOOLCHAIN }} - name: Build the binaries diff --git a/.github/workflows/epoch-tests.yml b/.github/workflows/epoch-tests.yml index be00618d505..bccedf7056e 100644 --- a/.github/workflows/epoch-tests.yml +++ b/.github/workflows/epoch-tests.yml @@ -29,7 +29,6 @@ jobs: max-parallel: 32 matrix: test-name: - - tests::epoch_205::bigger_microblock_streams_in_2_05 - tests::epoch_205::test_cost_limit_switch_version205 - tests::epoch_205::test_dynamic_db_method_costs - tests::epoch_205::test_exact_block_costs diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 5028d35968a..b796f36bee0 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -29,9 +29,6 @@ on: description: "True if it is a signer release" required: true type: string - secrets: - GH_TOKEN: - required: true concurrency: group: github-release-${{ github.head_ref || github.ref }} @@ -51,6 +48,7 @@ jobs: inputs.signer_tag != '' name: Build Binaries runs-on: ubuntu-latest + environment: "Build Release" strategy: ## Run a maximum of 10 builds concurrently, using the matrix defined in inputs.arch max-parallel: 10 @@ -62,7 +60,6 @@ jobs: - windows cpu: - arm64 - - armv7 - x86-64 ## defaults to x86-64-v3 variant - intel haswell (2013) and newer # - x86-64-v2 ## intel nehalem (2008) and newer # - x86-64-v3 ## intel haswell (2013) and newer @@ -95,6 +92,8 @@ jobs: runs-on: ubuntu-latest needs: - build-binaries + permissions: + contents: write steps: ## Creates releases - name: Create Release @@ -106,39 +105,61 @@ jobs: signer_docker_tag: ${{ inputs.signer_docker_tag }} is_node_release: ${{ inputs.is_node_release }} is_signer_release: ${{ inputs.is_signer_release }} - GH_TOKEN: ${{ secrets.GH_TOKEN }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + ## Builds arch dependent Docker images from binaries ## + ## Note: this step requires the binaries in the create-release step to be uploaded ## Runs when the following is true: ## - either node or signer tag is provided - docker-image: + # docker-image: + # if: | + # inputs.node_tag != '' || + # inputs.signer_tag != '' + # name: Docker Image (Binary) + # runs-on: ubuntu-latest + # needs: + # - build-binaries + # - create-release + # strategy: + # fail-fast: false + # ## Build a maximum of 2 images concurrently based on matrix.dist + # max-parallel: 2 + # matrix: + # dist: + # - alpine + # - debian + # steps: + # - name: Create Docker Image + # uses: stacks-network/actions/stacks-core/release/docker-images@main + # with: + # node_tag: ${{ inputs.node_tag }} + # node_docker_tag: ${{ inputs.node_docker_tag }} + # signer_tag: ${{ inputs.signer_tag }} + # signer_docker_tag: ${{ inputs.signer_docker_tag }} + # is_node_release: ${{ inputs.is_node_release }} + # is_signer_release: ${{ inputs.is_signer_release }} + # DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + # DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + # dist: ${{ matrix.dist }} + + ## Create the downstream PR for the release branch to master,develop + create-pr: if: | inputs.node_tag != '' || inputs.signer_tag != '' - name: Docker Image (Binary) + name: Create Downstream PR (${{ github.ref_name }}) runs-on: ubuntu-latest needs: - build-binaries - create-release - strategy: - fail-fast: false - ## Build a maximum of 2 images concurrently based on matrix.dist - max-parallel: 2 - matrix: - dist: - - alpine - - debian + # - docker-image + permissions: + pull-requests: write steps: - - name: Create Docker Image - uses: stacks-network/actions/stacks-core/release/docker-images@main + - name: Open Downstream PR + id: create-pr + uses: stacks-network/actions/stacks-core/release/downstream-pr@main with: - node_tag: ${{ inputs.node_tag }} - node_docker_tag: ${{ inputs.node_docker_tag }} - signer_tag: ${{ inputs.signer_tag }} - signer_docker_tag: ${{ inputs.signer_docker_tag }} - is_node_release: ${{ inputs.is_node_release }} - is_signer_release: ${{ inputs.is_signer_release }} - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} - dist: ${{ matrix.dist }} + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index e45455f05b6..31cd40b360e 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -21,13 +21,6 @@ jobs: image: name: Build Image runs-on: ubuntu-latest - strategy: - fail-fast: false - ## Build a maximum of 2 images concurrently based on matrix.dist - max-parallel: 2 - matrix: - dist: - - debian steps: ## Setup Docker for the builds - name: Docker setup @@ -49,7 +42,7 @@ jobs: ## Set docker metatdata - name: Docker Metadata ( ${{matrix.dist}} ) id: docker_metadata - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 + uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 #v5.6.1 with: images: | ${{env.docker-org}}/${{ github.event.repository.name }} @@ -61,9 +54,9 @@ jobs: ## Build docker image - name: Build and Push ( ${{matrix.dist}} ) id: docker_build - uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 + uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 with: - file: ./.github/actions/dockerfiles/Dockerfile.${{matrix.dist}}-source + file: ./Dockerfile platforms: ${{ env.docker_platforms }} tags: ${{ steps.docker_metadata.outputs.tags }} labels: ${{ steps.docker_metadata.outputs.labels }} diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 457a2aaefd5..05b9f09f627 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -78,7 +78,7 @@ jobs: ## checkout the code - name: Checkout the latest code id: git_checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Run network relay tests id: nettest @@ -96,10 +96,10 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Execute core contract unit tests with clarinet-sdk id: clarinet_unit_test - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0 with: node-version: 18.x cache: "npm" @@ -125,7 +125,7 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Execute core contract unit tests in Clarinet id: clarinet_unit_test_v1 uses: docker://hirosystems/clarinet:1.7.1 diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 00000000000..64d94def266 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,2 @@ +group_imports = "StdExternalCrate" +imports_granularity = "Module" diff --git a/.vscode/launch.json b/.vscode/launch.json index 64a883de0ad..f645a6c87db 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -4,14 +4,19 @@ { "type": "lldb", "request": "launch", - "name": "executable 'blockstack-core'", + "name": "executable 'stacks-node'", "cargo": { - "args": ["build", "--bin=stacks-node"], + "args": [ + "build", + "--bin=stacks-node" + ], "filter": { "kind": "bin" } }, - "args": ["mockamoto"], + "args": [ + "mockamoto" + ], "cwd": "${workspaceFolder}" }, { @@ -19,7 +24,11 @@ "request": "launch", "name": "executable 'clarity-cli'", "cargo": { - "args": ["build", "--bin=clarity-cli", "--package=blockstack-core"], + "args": [ + "build", + "--bin=clarity-cli", + "--package=stackslib" + ], "filter": { "name": "clarity-cli", "kind": "bin" @@ -33,13 +42,19 @@ "request": "launch", "name": "executable 'blockstack-cli'", "cargo": { - "args": ["build", "--bin=blockstack-cli", "--package=blockstack-core"], + "args": [ + "build", + "--bin=blockstack-cli", + "--package=stackslib" + ], "filter": { "name": "blockstack-cli", "kind": "bin" } }, - "args": ["generate-sk"], + "args": [ + "generate-sk" + ], "cwd": "${workspaceFolder}" }, { @@ -47,23 +62,34 @@ "request": "launch", "name": "executable 'stacks-node' -- mocknet", "cargo": { - "args": ["build", "--bin=stacks-node", "--package=stacks-node"], + "args": [ + "build", + "--bin=stacks-node", + "--package=stacks-node" + ], "filter": { "name": "stacks-node", "kind": "bin" } }, - "args": ["mocknet"], + "args": [ + "mocknet" + ], "cwd": "${workspaceFolder}" }, { "type": "lldb", "request": "launch", - "name": "unit tests in library 'blockstack_lib'", + "name": "unit tests in library 'stackslib'", "cargo": { - "args": ["test", "--no-run", "--lib", "--package=blockstack-core"], + "args": [ + "test", + "--no-run", + "--lib", + "--package=stackslib" + ], "filter": { - "name": "blockstack_lib", + "name": "stackslib", "kind": "lib" } }, @@ -73,13 +99,13 @@ { "type": "lldb", "request": "launch", - "name": "unit tests in executable 'blockstack-core'", + "name": "unit tests in executable 'stacks-inspect'", "cargo": { "args": [ "test", "--no-run", "--bin=stacks-inspect", - "--package=blockstack-core" + "--package=stackslib" ], "filter": { "name": "stacks-inspect", @@ -98,7 +124,7 @@ "test", "--no-run", "--bin=clarity-cli", - "--package=blockstack-core" + "--package=stackslib" ], "filter": { "name": "clarity-cli", @@ -117,7 +143,7 @@ "test", "--no-run", "--bin=blockstack-cli", - "--package=blockstack-core" + "--package=stackslib" ], "filter": { "name": "blockstack-cli", @@ -145,63 +171,6 @@ }, "args": [], "cwd": "${workspaceFolder}" - }, - { - "type": "lldb", - "request": "launch", - "name": "benchmark 'marf_bench'", - "cargo": { - "args": [ - "test", - "--no-run", - "--bench=marf_bench", - "--package=blockstack-core" - ], - "filter": { - "name": "marf_bench", - "kind": "bench" - } - }, - "args": [], - "cwd": "${workspaceFolder}" - }, - { - "type": "lldb", - "request": "launch", - "name": "benchmark 'large_contract_bench'", - "cargo": { - "args": [ - "test", - "--no-run", - "--bench=large_contract_bench", - "--package=blockstack-core" - ], - "filter": { - "name": "large_contract_bench", - "kind": "bench" - } - }, - "args": [], - "cwd": "${workspaceFolder}" - }, - { - "type": "lldb", - "request": "launch", - "name": "benchmark 'block_limits'", - "cargo": { - "args": [ - "test", - "--no-run", - "--bench=block_limits", - "--package=blockstack-core" - ], - "filter": { - "name": "block_limits", - "kind": "bench" - } - }, - "args": [], - "cwd": "${workspaceFolder}" } ] -} +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index ab8db95f5d9..4435fc28484 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,12 @@ { - "lldb.adapterType": "native", - "lldb.launch.sourceLanguages": ["rust"], + "lldb.launch.sourceLanguages": [ + "rust" + ], "rust-analyzer.runnables.extraEnv": { "BITCOIND_TEST": "1" - } + }, + "rust-analyzer.rustfmt.extraArgs": [ + "+nightly" + ], + "files.eol": "\n" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index d649981962e..79df6e82ae3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,13 +5,28 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Added + +- Add fee information to transaction log ending with "success" or "skipped", while building a new block +- Add `max_execution_time_secs` to miner config for limiting duration of contract calls +- When a miner's config file is updated (ie with a new fee rate), a new block commit is issued using + the new values ([#5924](https://github.com/stacks-network/stacks-core/pull/5924)) + +### Changed + +- When a miner times out waiting for signatures, it will re-propose the same block instead of building a new block ([#5877](https://github.com/stacks-network/stacks-core/pull/5877)) +- Improve tenure downloader trace verbosity applying proper logging level depending on the tenure state ("debug" if unconfirmed, "info" otherwise) ([#5871](https://github.com/stacks-network/stacks-core/issues/5871)) +- Remove warning log about missing UTXOs when a node is configured as `miner` with `mock_mining` mode enabled ([#5841](https://github.com/stacks-network/stacks-core/issues/5841)) + ## [3.1.0.0.7] -## Added +### Added - Add `disable_retries` mode for events_observer disabling automatic retry on error -## Changed +### Changed - Implement faster cost tracker for default cost functions in Clarity - By default, miners will wait for a new tenure to start for a configurable amount of time after receiving a burn block before @@ -21,7 +36,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [3.1.0.0.6] -## Added +### Added - The `BlockProposal` StackerDB message serialization struct now includes a `server_version` string, which represents the version of the node that the miner is using. ([#5803](https://github.com/stacks-network/stacks-core/pull/5803)) - Add `vrf_seed` to the `/v3/sortitions` rpc endpoint @@ -33,6 +48,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Logging improvements: - P2P logs now includes a reason for dropping a peer or neighbor - Improvements to how a PeerAddress is logged (human readable format vs hex) +- Pending event dispatcher requests will no longer be sent to URLs that are no longer registered as event observers ([#5834](https://github.com/stacks-network/stacks-core/pull/5834)) ### Fixed diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7c79fc286c8..577d417c2c5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -48,7 +48,7 @@ For an example of this process, see PRs ### Documentation Updates -- Any major changes should be added to the [CHANGELOG](CHANGELOG.md). +- Any major changes should be added to the [CHANGELOG](CHANGELOG.md)[*]. - Mention any required documentation changes in the description of your pull request. - If adding or updating an RPC endpoint, ensure the change is documented in the OpenAPI spec: [`./docs/rpc/openapi.yaml`](./docs/rpc/openapi.yaml). @@ -56,6 +56,9 @@ For an example of this process, see PRs test, module, function, etc.), each should be documented according to our [coding guidelines](#Coding-Guidelines). +> [*] The Changelog focuses on product changes. A "major change" refers to updates that have a direct impact on the end user, such as introducing new features, modifying existing functionality, or optimizing runtime performance. +On the other hand, changes that do not need to be reflected in the Changelog include code refactoring, writing tests, or automating processes, as these do not directly affect the user experience. + ## Git Commit Messages Aim to use descriptive git commit messages. We try to follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/). diff --git a/Cargo.lock b/Cargo.lock index a51010ecdf3..989c0d34f74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,7 +29,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -173,6 +173,12 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + [[package]] name = "ascii" version = "1.1.0" @@ -461,6 +467,21 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bitflags" version = "1.3.2" @@ -479,7 +500,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -488,7 +509,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -572,7 +593,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -685,7 +706,7 @@ dependencies = [ "aes-gcm", "base64 0.13.1", "hkdf", - "hmac", + "hmac 0.10.1", "percent-encoding", "rand 0.8.5", "sha2 0.9.9", @@ -739,47 +760,49 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", + "generic-array", "typenum", ] [[package]] name = "crypto-mac" -version = "0.10.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array 0.14.7", "subtle", ] [[package]] -name = "ctr" -version = "0.6.0" +name = "crypto-mac" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" +checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" dependencies = [ - "cipher", + "generic-array", + "subtle", ] [[package]] -name = "curve25519-dalek" -version = "2.0.0" +name = "ctr" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" +checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "serde", - "subtle", - "zeroize", + "cipher", ] [[package]] @@ -794,6 +817,7 @@ dependencies = [ "digest 0.10.7", "fiat-crypto", "rustc_version 0.4.0", + "serde", "subtle", "zeroize", ] @@ -834,22 +858,13 @@ dependencies = [ "powerfmt", ] -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -906,7 +921,7 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.3", + "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", @@ -1193,15 +1208,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1382,7 +1388,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51ab2f639c231793c5f6114bdb9bbe50a7dbbfcd7c7c6bd8475dec2d991e964f" dependencies = [ "digest 0.9.0", - "hmac", + "hmac 0.10.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", ] [[package]] @@ -1391,10 +1407,21 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" dependencies = [ - "crypto-mac", + "crypto-mac 0.10.0", "digest 0.9.0", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.7", + "hmac 0.8.1", +] + [[package]] name = "http" version = "0.2.11" @@ -1502,7 +1529,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -1737,6 +1764,54 @@ dependencies = [ "redox_syscall 0.4.1", ] +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.1", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libsigner" version = "0.0.1" @@ -2270,6 +2345,26 @@ dependencies = [ "thiserror", ] +[[package]] +name = "proptest" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.4.2", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax 0.8.2", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "protobuf" version = "2.28.0" @@ -2285,6 +2380,12 @@ dependencies = [ "cc", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.35" @@ -2365,6 +2466,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -2665,6 +2775,18 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.16" @@ -3043,12 +3165,14 @@ name = "stacks-common" version = "0.0.1" dependencies = [ "chrono", - "curve25519-dalek 2.0.0", + "curve25519-dalek", "ed25519-dalek", "hashbrown 0.15.2", "lazy_static", "libc", + "libsecp256k1", "nix", + "proptest", "rand 0.8.5", "rand_core 0.6.4", "ripemd", @@ -3152,7 +3276,6 @@ dependencies = [ "assert-json-diff 1.1.0", "chrono", "clarity", - "curve25519-dalek 2.0.0", "ed25519-dalek", "hashbrown 0.15.2", "integer-sqrt", @@ -3672,6 +3795,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "2.7.0" @@ -3708,7 +3837,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ - "generic-array 0.14.7", + "generic-array", "subtle", ] @@ -3766,6 +3895,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "waker-fn" version = "1.1.1" diff --git a/Dockerfile b/Dockerfile index 83270f3997a..ca03fa3ac60 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,17 +5,12 @@ ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' WORKDIR /src - COPY . . - RUN mkdir /out - +RUN rustup toolchain install stable RUN cargo build --features monitoring_prom,slog_json --release - -RUN cp target/release/stacks-node /out +RUN cp -R target/release/. /out FROM debian:bookworm-slim - -COPY --from=build /out/ /bin/ - +COPY --from=build /out/stacks-node /out/stacks-signer /out/stacks-inspect /bin/ CMD ["stacks-node", "mainnet"] diff --git a/README.md b/README.md index 5e0aa26dbea..9f8f38c4d13 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,8 @@ _Warning, this typically takes a few minutes_ cargo nextest run ``` +_On Windows, many tests will fail, mainly due to parallelism. To mitigate the issue you may need to run the tests individually._ + ## Run the testnet You can observe the state machine in action locally by running: @@ -85,8 +87,6 @@ You can observe the state machine in action locally by running: cargo run --bin stacks-node -- start --config ./sample/conf/testnet-follower-conf.toml ``` -_On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ - Additional testnet documentation is available [here](./docs/testnet.md) and [here](https://docs.stacks.co/docs/nodes-and-miners/miner-testnet) ## Release Process diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 284e856e498..9077834a703 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -27,11 +27,11 @@ regex = "1" lazy_static = "1.4.0" integer-sqrt = "0.1.3" slog = { version = "2.5.2", features = [ "max_level_trace" ] } -stacks_common = { package = "stacks-common", path = "../stacks-common", optional = true, default-features = false } +stacks_common = { package = "stacks-common", path = "../stacks-common", default-features = false } rstest = "0.17.0" rstest_reuse = "0.5.0" hashbrown = { workspace = true } -rusqlite = { workspace = true, optional = true} +rusqlite = { workspace = true, optional = true } [dependencies.serde_json] version = "1.0" @@ -49,11 +49,11 @@ mutants = "0.0.3" # criterion = "0.3" [features] -default = ["canonical"] -canonical = ["rusqlite", "stacks_common/canonical"] +default = ["rusqlite"] developer-mode = ["stacks_common/developer-mode"] slog_json = ["stacks_common/slog_json"] -testing = ["canonical"] +rusqlite = ["stacks_common/rusqlite", "dep:rusqlite"] +testing = [] devtools = [] rollback_value_check = [] disable-costs = [] diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 5c3f68c7f95..6cd03afc3cf 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -194,6 +194,9 @@ pub enum CheckErrors { WriteAttemptedInReadOnly, AtBlockClosureMustBeReadOnly, + + // time checker errors + ExecutionTimeExpired, } #[derive(Debug, PartialEq)] @@ -277,6 +280,7 @@ impl From for CheckErrors { CheckErrors::Expects("Unexpected interpreter failure in cost computation".into()) } CostErrors::Expect(s) => CheckErrors::Expects(s), + CostErrors::ExecutionTimeExpired => CheckErrors::ExecutionTimeExpired, } } } @@ -466,6 +470,7 @@ impl DiagnosableError for CheckErrors { CheckErrors::UncheckedIntermediaryResponses => "intermediary responses in consecutive statements must be checked".into(), CheckErrors::CostComputationFailed(s) => format!("contract cost computation failed: {}", s), CheckErrors::CouldNotDetermineSerializationType => "could not determine the input type for the serialization function".into(), + CheckErrors::ExecutionTimeExpired => "execution time expired".into(), } } diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 8dde917df9f..11e42ad9ae1 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -36,7 +36,7 @@ use self::type_checker::v2_1::TypeChecker as TypeChecker2_1; pub use self::types::{AnalysisPass, ContractAnalysis}; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::costs::LimitedCostTracker; -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] use crate::vm::database::MemoryBackingStore; use crate::vm::database::STORE_CONTRACT_SRC_INTERFACE; use crate::vm::representations::SymbolicExpression; @@ -44,7 +44,7 @@ use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::ClarityVersion; /// Used by CLI tools like the docs generator. Not used in production -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] pub fn mem_type_check( snippet: &str, version: ClarityVersion, diff --git a/clarity/src/vm/ast/errors.rs b/clarity/src/vm/ast/errors.rs index 6c668bacc1d..278846fc505 100644 --- a/clarity/src/vm/ast/errors.rs +++ b/clarity/src/vm/ast/errors.rs @@ -92,6 +92,8 @@ pub enum ParseErrors { UnexpectedParserFailure, /// Should be an unreachable failure which invalidates the transaction InterpreterFailure, + + ExecutionTimeExpired, } #[derive(Debug, PartialEq)] @@ -173,6 +175,7 @@ impl From for ParseError { CostErrors::InterpreterFailure | CostErrors::Expect(_) => { ParseError::new(ParseErrors::InterpreterFailure) } + CostErrors::ExecutionTimeExpired => ParseError::new(ParseErrors::ExecutionTimeExpired), } } } @@ -299,6 +302,7 @@ impl DiagnosableError for ParseErrors { ParseErrors::NoteToMatchThis(token) => format!("to match this '{}'", token), ParseErrors::UnexpectedParserFailure => "unexpected failure while parsing".to_string(), ParseErrors::InterpreterFailure => "unexpected failure while parsing".to_string(), + ParseErrors::ExecutionTimeExpired => "max execution time expired".to_string(), } } diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 1e503d14254..8c5ac144c43 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -61,6 +61,9 @@ impl From for Error { CheckErrors::MemoryBalanceExceeded(_a, _b) => { Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) } + CheckErrors::ExecutionTimeExpired => { + Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) + } _ => Error::Analysis(e), } } @@ -75,6 +78,9 @@ impl From for Error { InterpreterError::Unchecked(CheckErrors::CostOverflow) => { Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) } + InterpreterError::Unchecked(CheckErrors::ExecutionTimeExpired) => { + Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) + } _ => Error::Interpreter(e), } } @@ -90,6 +96,9 @@ impl From for Error { ParseErrors::MemoryBalanceExceeded(_a, _b) => { Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) } + ParseErrors::ExecutionTimeExpired => { + Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) + } _ => Error::Parse(e), } } @@ -284,6 +293,7 @@ pub trait TransactionConnection: ClarityConnection { /// abort_call_back is called with an AssetMap and a ClarityDatabase reference, /// if abort_call_back returns true, all modifications from this transaction will be rolled back. /// otherwise, they will be committed (though they may later be rolled back if the block itself is rolled back). + #[allow(clippy::too_many_arguments)] fn run_contract_call( &mut self, sender: &PrincipalData, @@ -292,6 +302,7 @@ pub trait TransactionConnection: ClarityConnection { public_function: &str, args: &[Value], abort_call_back: F, + max_execution_time: Option, ) -> Result<(Value, AssetMap, Vec), Error> where F: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, @@ -303,6 +314,11 @@ pub trait TransactionConnection: ClarityConnection { self.with_abort_callback( |vm_env| { + if let Some(max_execution_time_duration) = max_execution_time { + vm_env + .context + .set_max_execution_time(max_execution_time_duration); + } vm_env .execute_transaction( sender.clone(), @@ -329,6 +345,7 @@ pub trait TransactionConnection: ClarityConnection { /// abort_call_back is called with an AssetMap and a ClarityDatabase reference, /// if abort_call_back returns true, all modifications from this transaction will be rolled back. /// otherwise, they will be committed (though they may later be rolled back if the block itself is rolled back). + #[allow(clippy::too_many_arguments)] fn initialize_smart_contract( &mut self, identifier: &QualifiedContractIdentifier, @@ -337,12 +354,18 @@ pub trait TransactionConnection: ClarityConnection { contract_str: &str, sponsor: Option, abort_call_back: F, + max_execution_time: Option, ) -> Result<(AssetMap, Vec), Error> where F: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, { let (_, asset_map, events, aborted) = self.with_abort_callback( |vm_env| { + if let Some(max_execution_time_duration) = max_execution_time { + vm_env + .context + .set_max_execution_time(max_execution_time_duration); + } vm_env .initialize_contract_from_ast( identifier.clone(), diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 87c9d56de1c..3b98c4b828a 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -17,6 +17,7 @@ use std::collections::BTreeMap; use std::fmt; use std::mem::replace; +use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; use serde::Serialize; @@ -181,6 +182,17 @@ pub struct EventBatch { pub events: Vec, } +/** ExecutionTimeTracker keeps track of how much time a contract call is taking. + It is checked at every eval call. +*/ +pub enum ExecutionTimeTracker { + NoTracking, + MaxTime { + start_time: Instant, + max_duration: Duration, + }, +} + /** GlobalContext represents the outermost context for a single transaction's execution. It tracks an asset changes that occurred during the processing of the transaction, whether or not the current context is read_only, @@ -199,6 +211,7 @@ pub struct GlobalContext<'a, 'hooks> { /// This is the chain ID of the transaction pub chain_id: u32, pub eval_hooks: Option>, + pub execution_time_tracker: ExecutionTimeTracker, } #[derive(Serialize, Deserialize, Clone)] @@ -799,6 +812,11 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { self.context.cost_track.get_total() } + #[cfg(any(test, feature = "testing"))] + pub fn mut_cost_tracker(&mut self) -> &mut LimitedCostTracker { + &mut self.context.cost_track + } + /// Destroys this environment, returning ownership of its database reference. /// If the context wasn't top-level (i.e., it had uncommitted data), return None, /// because the database is not guaranteed to be in a sane state. @@ -1548,6 +1566,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { epoch_id, chain_id, eval_hooks: None, + execution_time_tracker: ExecutionTimeTracker::NoTracking, } } @@ -1555,6 +1574,13 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { self.asset_maps.is_empty() } + pub fn set_max_execution_time(&mut self, max_execution_time: Duration) { + self.execution_time_tracker = ExecutionTimeTracker::MaxTime { + start_time: Instant::now(), + max_duration: max_execution_time, + } + } + fn get_asset_map(&mut self) -> Result<&mut AssetMap> { self.asset_maps .last_mut() diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 65a0377cdf3..0b1559795f6 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -38,7 +38,7 @@ use crate::vm::types::Value::UInt; use crate::vm::types::{ FunctionType, PrincipalData, QualifiedContractIdentifier, TupleData, TypeSignature, }; -use crate::vm::{eval_all, ClarityName, SymbolicExpression, Value}; +use crate::vm::{CallStack, ClarityName, Environment, LocalContext, SymbolicExpression, Value}; pub mod constants; pub mod cost_functions; @@ -331,7 +331,7 @@ pub struct TrackerData { /// if the cost tracker is non-free, this holds the StacksEpochId that should be used to evaluate /// the Clarity cost functions. If the tracker *is* free, then those functions do not need to be /// evaluated, so no epoch identifier is necessary. - epoch: StacksEpochId, + pub epoch: StacksEpochId, mainnet: bool, chain_id: u32, } @@ -414,6 +414,7 @@ pub enum CostErrors { CostContractLoadFailure, InterpreterFailure, Expect(String), + ExecutionTimeExpired, } impl CostErrors { @@ -1053,8 +1054,8 @@ pub fn parse_cost( // TODO: add tests from mutation testing results #4832 #[cfg_attr(test, mutants::skip)] -fn compute_cost( - cost_tracker: &mut TrackerData, +pub fn compute_cost( + cost_tracker: &TrackerData, cost_function_reference: ClarityCostFunctionReference, input_sizes: &[u64], eval_in_epoch: StacksEpochId, @@ -1073,7 +1074,7 @@ fn compute_cost( let cost_contract = cost_tracker .cost_contracts - .get_mut(&cost_function_reference.contract_id) + .get(&cost_function_reference.contract_id) .ok_or(CostErrors::CostComputationFailed(format!( "CostFunction not found: {cost_function_reference}" )))?; @@ -1088,14 +1089,23 @@ fn compute_cost( ))); } - let function_invocation = [SymbolicExpression::list(program)]; + let function_invocation = SymbolicExpression::list(program); + let eval_result = global_context.execute(|global_context| { + let context = LocalContext::new(); + let mut call_stack = CallStack::new(); + let publisher: PrincipalData = cost_contract.contract_identifier.issuer.clone().into(); + let mut env = Environment::new( + global_context, + cost_contract, + &mut call_stack, + Some(publisher.clone()), + Some(publisher.clone()), + None, + ); - let eval_result = eval_all( - &function_invocation, - cost_contract, - &mut global_context, - None, - ); + let result = super::eval(&function_invocation, &mut env, &context)?; + Ok(Some(result)) + }); parse_cost(&cost_function_reference.to_string(), eval_result) } diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index a37669f499b..36599d7eea4 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -14,14 +14,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] use rusqlite::Connection; use stacks_common::types::chainstate::{StacksBlockId, TrieHash}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; use crate::vm::contexts::GlobalContext; -#[cfg(feature = "canonical")] use crate::vm::database::{ ClarityDatabase, ClarityDeserializable, ClaritySerializable, NULL_BURN_STATE_DB, NULL_HEADER_DB, }; @@ -85,7 +84,7 @@ pub trait ClarityBackingStore { fn get_open_chain_tip_height(&mut self) -> u32; fn get_open_chain_tip(&mut self) -> StacksBlockId; - #[cfg(feature = "canonical")] + #[cfg(feature = "rusqlite")] fn get_side_store(&mut self) -> &Connection; fn get_cc_special_cases_handler(&self) -> Option { @@ -222,7 +221,7 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } - #[cfg(feature = "canonical")] + #[cfg(feature = "rusqlite")] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") } diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index 65236cd88a1..cee4cbe00cc 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] pub use sqlite::MemoryBackingStore; pub use self::clarity_db::{ @@ -22,7 +22,7 @@ pub use self::clarity_db::{ }; pub use self::clarity_store::{ClarityBackingStore, SpecialCaseHandler}; pub use self::key_value_wrapper::{RollbackWrapper, RollbackWrapperPersistedLog}; -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] pub use self::sqlite::SqliteConnection; pub use self::structures::{ ClarityDeserializable, ClaritySerializable, DataMapMetadata, DataVariableMetadata, @@ -32,6 +32,6 @@ pub use self::structures::{ pub mod clarity_db; pub mod clarity_store; mod key_value_wrapper; -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] pub mod sqlite; mod structures; diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index 70c1b3ecb25..1acdda78d85 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -4,13 +4,13 @@ use hashbrown::{HashMap, HashSet}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::StacksEpochId; -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] use crate::vm::analysis::mem_type_check; use crate::vm::analysis::ContractAnalysis; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::contexts::GlobalContext; use crate::vm::costs::LimitedCostTracker; -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] use crate::vm::database::MemoryBackingStore; use crate::vm::docs::{get_input_type_string, get_output_type_string, get_signature}; use crate::vm::types::{FunctionType, QualifiedContractIdentifier, Value}; @@ -63,7 +63,7 @@ fn make_func_ref(func_name: &str, func_type: &FunctionType, description: &str) - } } -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] #[allow(clippy::expect_used)] fn get_constant_value(var_name: &str, contract_content: &str) -> Value { let to_eval = format!("{}\n{}", contract_content, var_name); @@ -72,7 +72,7 @@ fn get_constant_value(var_name: &str, contract_content: &str) -> Value { .expect("BUG: failed to return constant value") } -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] fn doc_execute(program: &str) -> Result, vm::Error> { let contract_id = QualifiedContractIdentifier::transient(); let mut contract_context = ContractContext::new(contract_id.clone(), ClarityVersion::Clarity2); @@ -99,7 +99,7 @@ fn doc_execute(program: &str) -> Result, vm::Error> { }) } -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] #[allow(clippy::expect_used)] pub fn make_docs( content: &str, @@ -185,7 +185,7 @@ pub fn make_docs( /// Produce a set of documents for multiple contracts, supplied as a list of `(contract_name, contract_content)` pairs, /// and a map from `contract_name` to corresponding `ContractSupportDocs` -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] pub fn produce_docs_refs, B: AsRef>( contracts: &[(A, B)], support_docs: &HashMap<&str, ContractSupportDocs>, diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index a92b4fdfdb4..d47057b29cf 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -499,7 +499,6 @@ Note: Corner cases are handled with the following rules: * if both `i1` and `i2` are `0`, return `1` * if `i1` is `1`, return `1` * if `i1` is `0`, return `0` - * if `i2` is `1`, return `i1` * if `i2` is negative or greater than `u32::MAX`, throw a runtime error", example: "(pow 2 3) ;; Returns 8 (pow 2 2) ;; Returns 4 diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index 5f2b93c1e5f..a3100dcd83a 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -16,7 +16,7 @@ use std::{error, fmt}; -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] use rusqlite::Error as SqliteError; use serde_json::Error as SerdeJSONErr; use stacks_common::types::chainstate::BlockHeaderHash; @@ -57,7 +57,7 @@ pub enum InterpreterError { UninitializedPersistedVariable, FailedToConstructAssetTable, FailedToConstructEventBatch, - #[cfg(feature = "canonical")] + #[cfg(feature = "rusqlite")] SqliteError(IncomparableError), BadFileName, FailedToCreateDataDirectory, @@ -241,7 +241,7 @@ mod test { fn error_formats() { let t = "(/ 10 0)"; let expected = "DivisionByZero - Stack Trace: + Stack Trace: _native_:native_div "; diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 82c9b5a4db7..7f7f0d1541f 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -42,7 +42,7 @@ pub mod coverage; pub mod events; -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] pub mod tooling; #[cfg(any(test, feature = "testing"))] @@ -55,6 +55,7 @@ pub mod clarity; use std::collections::BTreeMap; +use costs::CostErrors; use serde_json; use stacks_common::types::StacksEpochId; @@ -63,10 +64,10 @@ use self::ast::ContractAST; use self::costs::ExecutionCost; use self::diagnostic::Diagnostic; use crate::vm::callables::CallableType; -use crate::vm::contexts::GlobalContext; pub use crate::vm::contexts::{ CallStack, ContractContext, Environment, LocalContext, MAX_CONTEXT_DEPTH, }; +use crate::vm::contexts::{ExecutionTimeTracker, GlobalContext}; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::{ runtime_cost, CostOverflowingMath, CostTracker, LimitedCostTracker, MemoryConsumer, @@ -303,6 +304,22 @@ pub fn apply( } } +fn check_max_execution_time_expired(global_context: &GlobalContext) -> Result<()> { + match global_context.execution_time_tracker { + ExecutionTimeTracker::NoTracking => Ok(()), + ExecutionTimeTracker::MaxTime { + start_time, + max_duration, + } => { + if start_time.elapsed() >= max_duration { + Err(CostErrors::ExecutionTimeExpired.into()) + } else { + Ok(()) + } + } + } +} + pub fn eval( exp: &SymbolicExpression, env: &mut Environment, @@ -312,6 +329,8 @@ pub fn eval( Atom, AtomValue, Field, List, LiteralValue, TraitReference, }; + check_max_execution_time_expired(env.global_context)?; + if let Some(mut eval_hooks) = env.global_context.eval_hooks.take() { for hook in eval_hooks.iter_mut() { hook.will_begin_eval(env, context, exp); @@ -502,13 +521,17 @@ pub fn execute_on_network(program: &str, use_mainnet: bool) -> Result( program: &str, clarity_version: ClarityVersion, epoch: StacksEpochId, ast_rules: ast::ASTRules, use_mainnet: bool, -) -> Result> { + mut global_context_function: F, +) -> Result> +where + F: FnMut(&mut GlobalContext) -> Result<()>, +{ use crate::vm::database::MemoryBackingStore; use crate::vm::tests::test_only_mainnet_to_chain_id; use crate::vm::types::QualifiedContractIdentifier; @@ -526,6 +549,7 @@ pub fn execute_with_parameters( epoch, ); global_context.execute(|g| { + global_context_function(g)?; let parsed = ast::build_ast_with_rules( &contract_id, program, @@ -539,6 +563,24 @@ pub fn execute_with_parameters( }) } +#[cfg(any(test, feature = "testing"))] +pub fn execute_with_parameters( + program: &str, + clarity_version: ClarityVersion, + epoch: StacksEpochId, + ast_rules: ast::ASTRules, + use_mainnet: bool, +) -> Result> { + execute_with_parameters_and_call_in_global_context( + program, + clarity_version, + epoch, + ast_rules, + use_mainnet, + |_| Ok(()), + ) +} + /// Execute for test with `version`, Epoch20, testnet. #[cfg(any(test, feature = "testing"))] pub fn execute_against_version(program: &str, version: ClarityVersion) -> Result> { @@ -563,6 +605,25 @@ pub fn execute(program: &str) -> Result> { ) } +/// Execute for test in Clarity1, Epoch20, testnet. +#[cfg(any(test, feature = "testing"))] +pub fn execute_with_limited_execution_time( + program: &str, + max_execution_time: std::time::Duration, +) -> Result> { + execute_with_parameters_and_call_in_global_context( + program, + ClarityVersion::Clarity1, + StacksEpochId::Epoch20, + ast::ASTRules::PrecheckSize, + false, + |g| { + g.set_max_execution_time(max_execution_time); + Ok(()) + }, + ) +} + /// Execute for test in Clarity2, Epoch21, testnet. #[cfg(any(test, feature = "testing"))] pub fn execute_v2(program: &str) -> Result> { diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index ceeb7f9ddb5..59f08437545 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::time::Duration; + use rstest::rstest; use rstest_reuse::{self, *}; use stacks_common::address::{ @@ -37,8 +39,10 @@ use crate::vm::types::{ StacksAddressExtensions, TypeSignature, }; use crate::vm::{ - eval, execute as vm_execute, execute_v2 as vm_execute_v2, execute_with_parameters, CallStack, - ClarityVersion, ContractContext, Environment, GlobalContext, LocalContext, Value, + eval, execute as vm_execute, execute_v2 as vm_execute_v2, + execute_with_limited_execution_time as vm_execute_with_limited_execution_time, + execute_with_parameters, CallStack, ClarityVersion, ContractContext, CostErrors, Environment, + GlobalContext, LocalContext, Value, }; #[test] @@ -1763,3 +1767,13 @@ fn test_chain_id() { ) }); } + +#[test] +fn test_execution_time_expiration() { + assert_eq!( + vm_execute_with_limited_execution_time("(+ 1 1)", Duration::from_secs(0)) + .err() + .unwrap(), + CostErrors::ExecutionTimeExpired.into() + ); +} diff --git a/contrib/nix/README.md b/contrib/nix/README.md new file mode 100644 index 00000000000..72b996e068f --- /dev/null +++ b/contrib/nix/README.md @@ -0,0 +1,21 @@ +# `nix` flake + +Build `stacks-node` and `stacks-signer` by pointing to the `flake.nix` file in +this directory. For instance, from the root directory: `nix build +'./contrib/nix'`. + +## Installing `nix` + +Follow the [official documentation](https://nix.dev/install-nix) or use the +[Determinate Nix Installer](https://github.com/DeterminateSystems/nix-installer). + +## Using `direnv` + +If using `direnv`, from the root directory of this repository: + +```bash +echo "use flake ./contrib/nix/" > .envrc +direnv allow +``` + +This will provide a `sh` environment with required dependencies (e.g., `bitcoind`) available. diff --git a/contrib/nix/flake.lock b/contrib/nix/flake.lock new file mode 100644 index 00000000000..69951ab2969 --- /dev/null +++ b/contrib/nix/flake.lock @@ -0,0 +1,101 @@ +{ + "nodes": { + "crane": { + "locked": { + "lastModified": 1739936662, + "narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=", + "owner": "ipetkov", + "repo": "crane", + "rev": "19de14aaeb869287647d9461cbd389187d8ecdb7", + "type": "github" + }, + "original": { + "owner": "ipetkov", + "repo": "crane", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": [ + "systems" + ] + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1740547748, + "narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "3a05eebede89661660945da1f151959900903b6a", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "crane": "crane", + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs", + "rust-overlay": "rust-overlay", + "systems": "systems" + } + }, + "rust-overlay": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1740709839, + "narHash": "sha256-4dF++MXIXna/AwlZWDKr7bgUmY4xoEwvkF1GewjNrt0=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "b4270835bf43c6f80285adac6f66a26d83f0f277", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/contrib/nix/flake.nix b/contrib/nix/flake.nix new file mode 100644 index 00000000000..afd88e8a61f --- /dev/null +++ b/contrib/nix/flake.nix @@ -0,0 +1,190 @@ +{ + description = "stacks-core"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + systems.url = "github:nix-systems/default"; + + flake-utils = { + url = "github:numtide/flake-utils"; + inputs.systems.follows = "systems"; + }; + + rust-overlay = { + url = "github:oxalica/rust-overlay"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + + crane = { + url = "github:ipetkov/crane"; + }; + + }; + + outputs = + { + nixpkgs, + flake-utils, + rust-overlay, + crane, + ... + }: + flake-utils.lib.eachDefaultSystem ( + system: + let + overlays = [ (import rust-overlay) ]; + pkgs = import nixpkgs { + inherit system overlays; + }; + + inherit (pkgs) lib; + + toolchain = pkgs.rust-bin.fromRustupToolchainFile ../../rust-toolchain; + craneLib = (crane.mkLib pkgs).overrideToolchain toolchain; + + name = "stacks-core"; + + versions = (builtins.fromTOML (builtins.readFile ../../versions.toml)); + version = versions.stacks_node_version; + + # Common arguments can be set here to avoid repeating them later + commonArgs = { + strictDeps = true; + + buildInputs = + [ + # Add additional build inputs here + ] + ++ lib.optionals pkgs.stdenv.isDarwin [ + # Darwin specific inputs + pkgs.darwin.apple_sdk.frameworks.SystemConfiguration + ]; + }; + + # Build *just* the cargo dependencies, so we can reuse + # all of that work (e.g. via cachix) when running in CI + cargoArtifacts = craneLib.buildDepsOnly ( + commonArgs + // { + inherit version; + pname = name; + src = fileSetForCrate ../..; + } + ); + + individualCrateArgs = commonArgs // { + inherit cargoArtifacts; + + # NB: we disable tests since we'll run them all via cargo-nextest + doCheck = false; + }; + + # TODO: Return minimum fileSets per each crate + fileSetForCrate = + crate: + lib.fileset.toSource { + root = ../..; + fileset = lib.fileset.unions [ + ../../Cargo.toml + ../../Cargo.lock + # + ../../versions.toml + # + ../../stx-genesis/name_zonefiles.txt + ../../stx-genesis/name_zonefiles.txt.sha256 + ../../stx-genesis/name_zonefiles-test.txt + ../../stx-genesis/name_zonefiles-test.txt.sha256 + ../../stx-genesis/chainstate.txt + ../../stx-genesis/chainstate.txt.sha256 + ../../stx-genesis/chainstate-test.txt + ../../stx-genesis/chainstate-test.txt.sha256 + # + (craneLib.fileset.commonCargoSources crate) + # + (lib.fileset.fileFilter (file: file.hasExt "clar") ../..) + # + (craneLib.fileset.commonCargoSources ../../clarity) + (craneLib.fileset.commonCargoSources ../../contrib/tools/relay-server) + (craneLib.fileset.commonCargoSources ../../libsigner) + (craneLib.fileset.commonCargoSources ../../libstackerdb) + (craneLib.fileset.commonCargoSources ../../pox-locking) + (craneLib.fileset.commonCargoSources ../../stacks-common) + (craneLib.fileset.commonCargoSources ../../stackslib) + (craneLib.fileset.commonCargoSources ../../stx-genesis) + (craneLib.fileset.commonCargoSources ../../testnet/stacks-node) + ]; + }; + + stacks-signer = craneLib.buildPackage ( + individualCrateArgs + // rec { + version = versions.stacks_signer_version; + pname = "stacks-signer"; + cargoFeatures = "--features monitoring_prom"; + cargoExtraArgs = "${cargoFeatures} -p ${pname}"; + src = fileSetForCrate ../../stacks-signer; + } + ); + + # Build the actual crate itself, reusing the dependency + # artifacts from above. + stacks-core = craneLib.buildPackage ( + commonArgs + // rec { + inherit version cargoArtifacts; + doCheck = false; + pname = name; + cargoFeatures = "--features monitoring_prom,slog_json"; + cargoExtraArgs = "${cargoFeatures}"; + src = fileSetForCrate ../..; + } + ); + in + with pkgs; + { + packages = { + inherit stacks-signer; + default = stacks-core; + }; + + apps = rec { + stacks-node = { + type = "app"; + program = "${stacks-core}/bin/stacks-node"; + }; + stacks-signer = { + type = "app"; + program = "${stacks-signer}/bin/stacks-signer"; + }; + default = stacks-node; + }; + + checks = { + inherit stacks-core; + }; + + devShells.default = craneLib.devShell { + RUSTFMT = "${toolchain}/bin/rustfmt"; + GREETING = "Welcome, stacks-core developer!"; + shellHook = '' + echo $GREETING + + echo "Setting a few options that will help you when running tests:" + set -x + ulimit -n 10240 + set +x + ''; + + packages = + [ + rust-analyzer + bitcoind + ] + ++ lib.optionals pkgs.stdenv.isDarwin [ + pkgs.darwin.apple_sdk.frameworks.SystemConfiguration + pkgs.darwin.apple_sdk.frameworks.CoreServices + ]; + }; + } + ); +} diff --git a/contrib/tools/block-replay.sh b/contrib/tools/block-replay.sh index 4fb4a6a55b9..0b6259c73dd 100755 --- a/contrib/tools/block-replay.sh +++ b/contrib/tools/block-replay.sh @@ -16,18 +16,19 @@ set -o pipefail ## for 20 slices, this is about 1.8TB NETWORK="mainnet" ## network to replay -REPO_DIR="$HOME/stacks-inspect" ## where to build the source +REPO_DIR="$HOME/stacks-core" ## where to build the source REMOTE_REPO="stacks-network/stacks-core" ## remote git repo to build stacks-inspect from SCRATCH_DIR="$HOME/scratch" ## root folder for the replay slices TIMESTAMP=$(date +%Y-%m-%d-%s) ## use a simple date format year-month-day-epoch -LOG_DIR="/tmp/replay_${TIMESTAMP}" ## location of logfiles for the replay +LOG_DIR="$HOME/replay_${TIMESTAMP}" ## location of logfiles for the replay SLICE_DIR="${SCRATCH_DIR}/slice" ## location of slice dirs TMUX_SESSION="replay" ## tmux session name to run the replay TERM_OUT=false ## terminal friendly output TESTING=false ## only run a replay on a few thousand blocks BRANCH="develop" ## default branch to build stacks-inspect from CORES=$(grep -c processor /proc/cpuinfo) ## retrieve total number of CORES on the system -RESERVED=10 ## reserve this many CORES for other processes as default +RESERVED=8 ## reserve this many CORES for other processes as default +LOCAL_CHAINSTATE= ## path to local chainstate to use instead of snapshot download ## ansi color codes for terminal output COLRED=$'\033[31m' ## Red @@ -39,427 +40,482 @@ COLRESET=$'\033[0m' ## reset color/formatting ## verify that cargo is installed in the expected path, not only $PATH install_cargo() { - command -v "$HOME/.cargo/bin/cargo" >/dev/null 2>&1 || { - echo "Installing Rust via rustup" - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y || { - echo "${COLRED}Error${COLRESET} installing Rust" - exit 1 - } - } - echo "Exporting $HOME/.cargo/env" - # shellcheck source=/dev/null - source "$HOME/.cargo/env" - return 0 + command -v "$HOME/.cargo/bin/cargo" >/dev/null 2>&1 || { + echo "Installing Rust via rustup" + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y || { + echo "${COLRED}Error${COLRESET} installing Rust" + exit 1 + } + } + echo "Exporting $HOME/.cargo/env" + # shellcheck source=/dev/null + source "$HOME/.cargo/env" + return 0 } ## build stacks-inspect binary from specified repo/branch build_stacks_inspect() { - if [ -d "${REPO_DIR}" ];then - echo "Found ${COLYELLOW}${REPO_DIR}${COLRESET}. checking out ${COLGREEN}${BRANCH}${COLRESET} and resetting to ${COLBOLD}HEAD${COLRESET}" - cd "${REPO_DIR}" && git fetch - echo "Checking out ${BRANCH} and resetting to HEAD" - git stash ## stash any local changes to prevent checking out $BRANCH - (git checkout "${BRANCH}" && git reset --hard HEAD) || { - echo "${COLRED}Error${COLRESET} checking out ${BRANCH}" - exit 1 - } - else - echo "Cloning stacks-core ${BRANCH}" - (git clone "https://github.com/${REMOTE_REPO}" --branch "${BRANCH}" "${REPO_DIR}" && cd "${REPO_DIR}") || { - echo "${COLRED}Error${COLRESET} cloning https://github.com/${REMOTE_REPO} into ${REPO_DIR}" - exit 1 - } - fi - git pull - ## build stacks-inspect to: $HOME/stacks-inspect/target/release/stacks-inspect - echo "Building stacks-inspect binary" - cargo build --bin=stacks-inspect --release || { - echo "${COLRED}Error${COLRESET} building stacks-inspect binary" - exit 1 - } - echo "Done building. continuing" + if [ -d "${REPO_DIR}" ];then + echo "Found ${COLYELLOW}${REPO_DIR}${COLRESET}. checking out ${COLGREEN}${BRANCH}${COLRESET} and resetting to ${COLBOLD}HEAD${COLRESET}" + cd "${REPO_DIR}" && git fetch + echo "Checking out ${BRANCH} and resetting to HEAD" + git stash ## stash any local changes to prevent checking out $BRANCH + (git checkout "${BRANCH}" && git reset --hard HEAD) || { + echo "${COLRED}Error${COLRESET} checking out ${BRANCH}" + exit 1 + } + else + echo "Cloning stacks-core ${BRANCH}" + (git clone "https://github.com/${REMOTE_REPO}" --branch "${BRANCH}" "${REPO_DIR}" && cd "${REPO_DIR}") || { + echo "${COLRED}Error${COLRESET} cloning https://github.com/${REMOTE_REPO} into ${REPO_DIR}" + exit 1 + } + fi + git pull + ## build stacks-inspect to: $HOME/stacks-inspect/target/release/stacks-inspect + echo "Building stacks-inspect binary" + cargo build --bin=stacks-inspect --release || { + echo "${COLRED}Error${COLRESET} building stacks-inspect binary" + exit 1 + } + echo "Done building. continuing" } ## create the slice dirs from an chainstate archive (symlinking marf.sqlite.blobs), 1 dir per CPU configure_replay_slices() { - if [ -d "$HOME/scratch" ]; then - echo "Deleting existing scratch dir: ${COLYELLOW}$HOME/scratch${COLRESET}" - rm -rf "${HOME}/scratch" || { - echo "${COLRED}Error${COLRESET} deleting dir $HOME/scratch" - exit 1 - } - fi - echo "Creating scratch and slice dirs" - (mkdir -p "${SLICE_DIR}0" && cd "${SCRATCH_DIR}") || { - echo "${COLRED}Error${COLRESET} creating dir ${SLICE_DIR}" - exit 1 - } - echo "Downloading latest ${NETWORK} chainstate archive ${COLYELLOW}https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz${COLRESET}" - ## curl had some random issues retrying the download when network issues arose. wget has resumed more consistently, so we'll use that binary - # curl -L --proto '=https' --tlsv1.2 https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz -o ${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz || { - wget -O "${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz" "https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz" || { - echo "${COLRED}Error${COLRESET} downlaoding latest ${NETWORK} chainstate archive" - exit 1 - } - ## extract downloaded archive - echo "Extracting downloaded archive: ${COLYELLOW}${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz${COLRESET}" - tar --strip-components=1 -xzf "${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz" -C "${SLICE_DIR}0" || { - echo "${COLRED}Error${COLRESET} extracting ${NETWORK} chainstate archive" - exit - } - echo "Moving marf database: ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs -> ${COLYELLOW}${SCRATCH_DIR}/marf.sqlite.blobs${COLRESET}" - mv "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs "${SCRATCH_DIR}"/ - echo "Symlinking marf database: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${COLYELLOW}${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs${COLRESET}" - ln -s "${SCRATCH_DIR}"/marf.sqlite.blobs "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs || { - echo "${COLRED}Error${COLRESET} creating symlink: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs" - exit 1 - } + if [ -d "$HOME/scratch" ]; then + echo "Deleting existing scratch dir: ${COLYELLOW}$HOME/scratch${COLRESET}" + rm -rf "${HOME}/scratch" || { + echo "${COLRED}Error${COLRESET} deleting dir $HOME/scratch" + exit 1 + } + fi + echo "Creating scratch and slice dirs" + (mkdir -p "${SLICE_DIR}0" && cd "${SCRATCH_DIR}") || { + echo "${COLRED}Error${COLRESET} creating dir ${SLICE_DIR}" + exit 1 + } - ## create a copy of the linked db with - ## decrement by 1 since we already have ${SLICE_DIR}0 - for ((i=1;i<=$(( CORES - RESERVED - 1));i++)); do - echo "Copying ${SLICE_DIR}0 -> ${COLYELLOW}${SLICE_DIR}${i}${COLRESET}" - cp -R "${SLICE_DIR}0" "${SLICE_DIR}${i}" || { - echo "${COLRED}Error${COLRESET} copying ${SLICE_DIR}0 -> ${SLICE_DIR}${i}" - exit 1 - } - done + if [[ -n "${LOCAL_CHAINSTATE}" ]]; then + echo "Copying local chainstate '${LOCAL_CHAINSTATE}'" + cp -r "${LOCAL_CHAINSTATE}"/* "${SLICE_DIR}0" + else + echo "Downloading latest ${NETWORK} chainstate archive ${COLYELLOW}https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz${COLRESET}" + ## curl had some random issues retrying the download when network issues arose. wget has resumed more consistently, so we'll use that binary + # curl -L --proto '=https' --tlsv1.2 https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz -o ${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz || { + wget -O "${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz" "https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz" || { + echo "${COLRED}Error${COLRESET} downlaoding latest ${NETWORK} chainstate archive" + exit 1 + } + ## extract downloaded archive + echo "Extracting downloaded archive: ${COLYELLOW}${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz${COLRESET}" + tar --strip-components=1 -xzf "${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz" -C "${SLICE_DIR}0" || { + echo "${COLRED}Error${COLRESET} extracting ${NETWORK} chainstate archive" + exit + } + fi + echo "Moving marf database: ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs -> ${COLYELLOW}${SCRATCH_DIR}/marf.sqlite.blobs${COLRESET}" + mv "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs "${SCRATCH_DIR}"/ + echo "Symlinking marf database: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${COLYELLOW}${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs${COLRESET}" + ln -s "${SCRATCH_DIR}"/marf.sqlite.blobs "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs || { + echo "${COLRED}Error${COLRESET} creating symlink: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs" + exit 1 + } + + ## create a copy of the linked db with + ## decrement by 1 since we already have ${SLICE_DIR}0 + for ((i=1;i<=$(( CORES - RESERVED - 1));i++)); do + echo "Copying ${SLICE_DIR}0 -> ${COLYELLOW}${SLICE_DIR}${i}${COLRESET}" + cp -R "${SLICE_DIR}0" "${SLICE_DIR}${i}" || { + echo "${COLRED}Error${COLRESET} copying ${SLICE_DIR}0 -> ${SLICE_DIR}${i}" + exit 1 + } + done } ## setup the tmux sessions and create the logdir for storing output setup_replay() { - ## if there is an existing folder, rm it - if [ -d "${LOG_DIR}" ];then - echo "Removing logdir ${LOG_DIR}" - rm -rf "${LOG_DIR}" - fi - ## create LOG_DIR to store output files - if [ ! -d "${LOG_DIR}" ]; then - echo "Creating logdir ${LOG_DIR}" - mkdir -p "${LOG_DIR}" - fi - ## if tmux session "replay" exists, kill it and start anew - if eval "tmux list-windows -t ${TMUX_SESSION} &> /dev/null"; then - echo "Killing existing tmux session: ${TMUX_SESSION}" - eval "tmux kill-session -t ${TMUX_SESSION} &> /dev/null" - fi - local slice_counter=0 + ## if there is an existing folder, rm it + if [ -d "${LOG_DIR}" ];then + echo "Removing logdir ${LOG_DIR}" + rm -rf "${LOG_DIR}" + fi + ## create LOG_DIR to store output files + if [ ! -d "${LOG_DIR}" ]; then + echo "Creating logdir ${LOG_DIR}" + mkdir -p "${LOG_DIR}" + fi + ## if tmux session "replay" exists, kill it and start anew + if eval "tmux list-windows -t ${TMUX_SESSION} &> /dev/null"; then + echo "Killing existing tmux session: ${TMUX_SESSION}" + eval "tmux kill-session -t ${TMUX_SESSION} &> /dev/null" + fi + local slice_counter=0 - ## create tmux session named ${TMUX_SESSION} with a window named slice0 - tmux new-session -d -s ${TMUX_SESSION} -n slice${slice_counter} || { - echo "${COLRED}Error${COLRESET} creating tmux session ${COLYELLOW}${TMUX_SESSION}${COLRESET}" - exit 1 - } + ## create tmux session named ${TMUX_SESSION} with a window named slice0 + tmux new-session -d -s ${TMUX_SESSION} -n slice${slice_counter} || { + echo "${COLRED}Error${COLRESET} creating tmux session ${COLYELLOW}${TMUX_SESSION}${COLRESET}" + exit 1 + } - if [ ! -f "${SLICE_DIR}0/chainstate/vm/index.sqlite" ]; then - echo "${COLRED}Error${COLRESET}: chainstate db not found (${SLICE_DIR}0/chainstate/vm/index.sqlite)" - exit 1 - fi - return 0 + if [ ! -f "${SLICE_DIR}0/chainstate/vm/index.sqlite" ]; then + echo "${COLRED}Error${COLRESET}: chainstate db not found (${SLICE_DIR}0/chainstate/vm/index.sqlite)" + exit 1 + fi + return 0 } ## run the block replay start_replay() { - local mode=$1 - local total_blocks=0 - local starting_block=0 - local inspect_command - local slice_counter=0 - case "$mode" in - nakamoto) - ## nakamoto blocks - echo "Mode: ${COLYELLOW}${mode}${COLRESET}" - local log_append="_${mode}" - inspect_command="replay-naka-block" - ## get the total number of nakamoto blocks in db - total_blocks=$(echo "select count(*) from nakamoto_block_headers" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) - starting_block=0 # for the block counter, start at this block - ## use these values if `--testing` arg is provided (only replay 1_000 blocks) - ${TESTING} && total_blocks=301883 - ${TESTING} && starting_block=300883 - ;; - *) - ## pre-nakamoto blocks - echo "Mode: ${COLYELLOW}pre-nakamoto${COLRESET}" - local log_append="" - inspect_command="replay-block" - ## get the total number of blocks (with orphans) in db - total_blocks=$(echo "select count(*) from staging_blocks where orphaned = 0" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) - starting_block=0 # for the block counter, start at this block - ## use these values if `--testing` arg is provided (only replay 1_000 blocks) Note: 2.5 epoch is at 153106 - ${TESTING} && total_blocks=153000 - ${TESTING} && starting_block=15200 - ;; - esac - local block_diff=$((total_blocks - starting_block)) ## how many blocks are being replayed - local slices=$((CORES - RESERVED)) ## how many replay slices to use - local slice_blocks=$((block_diff / slices)) ## how many blocks to replay per slice - ${TESTING} && echo "${COLRED}Testing: ${TESTING}${COLRESET}" - echo "Total blocks: ${COLYELLOW}${total_blocks}${COLRESET}" - echo "Staring Block: ${COLYELLOW}$starting_block${COLRESET}" - echo "Block diff: ${COLYELLOW}$block_diff${COLRESET}" - echo "******************************************************" - echo "Total slices: ${COLYELLOW}${slices}${COLRESET}" - echo "Blocks per slice: ${COLYELLOW}${slice_blocks}${COLRESET}" - local end_block_count=$starting_block - while [[ ${end_block_count} -lt ${total_blocks} ]]; do - local start_block_count=$end_block_count - end_block_count=$((end_block_count + slice_blocks)) - if [[ "${end_block_count}" -gt "${total_blocks}" ]] || [[ "${slice_counter}" -eq $((slices - 1)) ]]; then - end_block_count="${total_blocks}" - fi - if [ "${mode}" != "nakamoto" ]; then ## don't create the tmux windows if we're replaying nakamoto blocks (they should already exist). TODO: check if it does exist in case the function call order changes - if [ "${slice_counter}" -gt 0 ];then - tmux new-window -t replay -d -n "slice${slice_counter}" || { - echo "${COLRED}Error${COLRESET} creating tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" - exit 1 - } - fi - fi - local log_file="${LOG_DIR}/slice${slice_counter}${log_append}.log" - local log=" | tee -a ${log_file}" - local cmd="${REPO_DIR}/target/release/stacks-inspect --config ${REPO_DIR}/stackslib/conf/${NETWORK}-follower-conf.toml ${inspect_command} ${SLICE_DIR}${slice_counter} index-range $start_block_count $end_block_count 2>/dev/null" - echo " Creating tmux window: ${COLGREEN}replay:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}" - echo "Command: ${cmd}" > "${log_file}" ## log the command being run for the slice - echo "Replaying indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}" - ## send `cmd` to the tmux window where the replay will run - tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "${cmd}${log}" Enter || { - echo "${COLRED}Error${COLRESET} sending replay command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" - exit 1 - } - ## log the return code as the last line - tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "echo \${PIPESTATUS[0]} >> ${log_file}" Enter || { - echo "${COLRED}Error${COLRESET} sending return status command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" - exit 1 - } - slice_counter=$((slice_counter + 1)) - done - check_progress + local mode=$1 + local total_blocks=0 + local starting_block=0 + local inspect_command + local slice_counter=0 + case "$mode" in + nakamoto) + ## nakamoto blocks + echo "Mode: ${COLYELLOW}${mode}${COLRESET}" + local log_append="_${mode}" + inspect_command="replay-naka-block" + ## get the total number of nakamoto blocks in db + total_blocks=$(echo "select count(*) from nakamoto_block_headers" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) + starting_block=0 # for the block counter, start at this block + ## use these values if `--testing` arg is provided (only replay 1_000 blocks) + ${TESTING} && total_blocks=301883 + ${TESTING} && starting_block=300883 + ;; + *) + ## pre-nakamoto blocks + echo "Mode: ${COLYELLOW}pre-nakamoto${COLRESET}" + local log_append="" + inspect_command="replay-block" + ## get the total number of blocks (with orphans) in db + total_blocks=$(echo "select count(*) from staging_blocks where orphaned = 0" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) + starting_block=0 # for the block counter, start at this block + ## use these values if `--testing` arg is provided (only replay 1_000 blocks) Note: 2.5 epoch is at 153106 + ${TESTING} && total_blocks=153000 + ${TESTING} && starting_block=15200 + ;; + esac + local block_diff=$((total_blocks - starting_block)) ## how many blocks are being replayed + local slices=$((CORES - RESERVED)) ## how many replay slices to use + local slice_blocks=$((block_diff / slices)) ## how many blocks to replay per slice + ${TESTING} && echo "${COLRED}Testing: ${TESTING}${COLRESET}" + echo "Total blocks: ${COLYELLOW}${total_blocks}${COLRESET}" + echo "Staring Block: ${COLYELLOW}$starting_block${COLRESET}" + echo "Block diff: ${COLYELLOW}$block_diff${COLRESET}" + echo "******************************************************" + echo "Total slices: ${COLYELLOW}${slices}${COLRESET}" + echo "Blocks per slice: ${COLYELLOW}${slice_blocks}${COLRESET}" + local end_block_count=$starting_block + while [[ ${end_block_count} -lt ${total_blocks} ]]; do + local start_block_count=$end_block_count + end_block_count=$((end_block_count + slice_blocks)) + if [[ "${end_block_count}" -gt "${total_blocks}" ]] || [[ "${slice_counter}" -eq $((slices - 1)) ]]; then + end_block_count="${total_blocks}" + fi + if [ "${mode}" != "nakamoto" ]; then ## don't create the tmux windows if we're replaying nakamoto blocks (they should already exist). TODO: check if it does exist in case the function call order changes + if [ "${slice_counter}" -gt 0 ];then + tmux new-window -t replay -d -n "slice${slice_counter}" || { + echo "${COLRED}Error${COLRESET} creating tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" + exit 1 + } + fi + fi + local log_file="${LOG_DIR}/slice${slice_counter}${log_append}.log" + local log=" | tee -a ${log_file}" + local cmd="${REPO_DIR}/target/release/stacks-inspect --config ${REPO_DIR}/stackslib/conf/${NETWORK}-follower-conf.toml ${inspect_command} ${SLICE_DIR}${slice_counter} index-range $start_block_count $end_block_count 2>/dev/null" + echo " Creating tmux window: ${COLGREEN}replay:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}" + echo "Command: ${cmd}" > "${log_file}" ## log the command being run for the slice + echo "Replaying indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}" + ## send `cmd` to the tmux window where the replay will run + tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "${cmd}${log}" Enter || { + echo "${COLRED}Error${COLRESET} sending replay command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" + exit 1 + } + ## log the return code as the last line + tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "echo \${PIPESTATUS[0]} >> ${log_file}" Enter || { + echo "${COLRED}Error${COLRESET} sending return status command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" + exit 1 + } + slice_counter=$((slice_counter + 1)) + done + check_progress } ## pretty print the status output (simple spinner while pids are active) check_progress() { - # give the pids a few seconds to show up in process table before checking if they're running - local sleep_duration=5 - local progress=1 - local sp="/-\|" - local count - while [ $sleep_duration -gt 0 ]; do - ${TERM_OUT} && printf "Sleeping ... \b [ %s%s%s ] \033[0K\r" "${COLYELLOW}" "${sleep_duration}" "${COLRESET}" - sleep_duration=$((sleep_duration-1)) - sleep 1 - done - echo "************************************************************************" - echo "Checking Block Replay status" - echo -e ' ' - while true; do - count=$(pgrep -c "stacks-inspect") - if [ "${count}" -gt 0 ]; then - ${TERM_OUT} && printf "Block replay processes are currently active [ %s%s%s%s ] ... \b${sp:progress++%${#sp}:1} \033[0K\r" "${COLYELLOW}" "${COLBOLD}" "${count}" "${COLRESET}" - else - ${TERM_OUT} && printf "\r\n" - break - fi - done - echo "************************************************************************" + # give the pids a few seconds to show up in process table before checking if they're running + local sleep_duration=5 + local progress=1 + local sp="/-\|" + local count + while [ $sleep_duration -gt 0 ]; do + ${TERM_OUT} && printf "Sleeping ... \b [ %s%s%s ] \033[0K\r" "${COLYELLOW}" "${sleep_duration}" "${COLRESET}" + sleep_duration=$((sleep_duration-1)) + sleep 1 + done + echo "************************************************************************" + echo "Checking Block Replay status" + echo -e ' ' + while true; do + count=$(pgrep -c "stacks-inspect") + if [ "${count}" -gt 0 ]; then + ${TERM_OUT} && printf "Block replay processes are currently active [ %s%s%s%s ] ... \b${sp:progress++%${#sp}:1} \033[0K\r" "${COLYELLOW}" "${COLBOLD}" "${count}" "${COLRESET}" + else + ${TERM_OUT} && printf "\r\n" + break + fi + done + echo "************************************************************************" } ## store the results in an aggregated logfile and an html file store_results() { - ## text file to store results - local results="${LOG_DIR}/results.log" - ## html file to store results - local results_html="${LOG_DIR}/results.html" - local failed=0; - local return_code=0; - local failure_count - echo "Results: ${COLYELLOW}${results}${COLRESET}" - cd "${LOG_DIR}" || { - echo "${COLRED}Error${COLRESET} Logdir ${COLYELLOW}${LOG_DIR}${COLRESET} doesn't exist" - exit 1 - } - ## retrieve the count of all lines with `Failed processing block` - failure_count=$(grep -rc "Failed processing block" slice*.log | awk -F: '$NF >= 0 {x+=$NF; $NF=""} END{print x}') - if [ "${failure_count}" -gt 0 ]; then - echo "Failures: ${COLRED}${failure_count}${COLRESET}" - else - echo "Failures: ${COLGREEN}${failure_count}${COLRESET}" - fi - echo "Failures: ${failure_count}" > "${results}" - ## check the return codes to see if we had a panic - for file in $(find . -name "slice*.log" -printf '%P\n' | sort); do - # for file in $(ls slice*.log | sort); do - echo "Checking file: ${COLYELLOW}$file${COLRESET}" - return_code=$(tail -1 "${file}") - case ${return_code} in - 0) - # block replay ran successfully - ;; - 1) - # block replay had some block failures - failed=1 - ;; - *) - # return code likely indicates a panic - failed=1 - echo "$file return code: $return_code" >> "${results}" # ok to continue if this write fails - ;; - esac - done + ## text file to store results + local results="${LOG_DIR}/results.log" + ## html file to store results + local results_html="${LOG_DIR}/results.html" + local failed=0; + local return_code=0; + local failure_count + echo "Results: ${COLYELLOW}${results}${COLRESET}" + cd "${LOG_DIR}" || { + echo "${COLRED}Error${COLRESET} Logdir ${COLYELLOW}${LOG_DIR}${COLRESET} doesn't exist" + exit 1 + } + ## retrieve the count of all lines with `Failed processing block` + failure_count=$(grep -rc "Failed processing block" slice*.log | awk -F: '$NF >= 0 {x+=$NF; $NF=""} END{print x}') + if [ "${failure_count}" -gt 0 ]; then + echo "Failures: ${COLRED}${failure_count}${COLRESET}" + else + echo "Failures: ${COLGREEN}${failure_count}${COLRESET}" + fi + echo "Failures: ${failure_count}" > "${results}" + ## check the return codes to see if we had a panic + for file in $(find . -name "slice*.log" -printf '%P\n' | sort); do + # for file in $(ls slice*.log | sort); do + echo "Checking file: ${COLYELLOW}$file${COLRESET}" + return_code=$(tail -1 "${file}") + case ${return_code} in + 0) + # block replay ran successfully + ;; + 1) + # block replay had some block failures + failed=1 + ;; + *) + # return code likely indicates a panic + failed=1 + echo "$file return code: $return_code" >> "${results}" # ok to continue if this write fails + ;; + esac + done - ## Store the results as HTML: - cat <<- _EOF_ > "${results_html}" - - -

$(date -u)

-
-

Failures: ${failure_count}

-
- _EOF_ + ## Store the results as HTML: + cat <<- _EOF_ > "${results_html}" + + +

$(date -u)

+
+

Failures: ${failure_count}

+
+_EOF_ - ## use the $failed var here in case there is a panic, then $failure_count may show zero, but the replay was not successful - if [ ${failed} == "1" ];then - output=$(grep -r -h "Failed processing block" slice*.log) - IFS=$'\n' - for line in ${output}; do - echo "
${line}
" >> "${results_html}" || { - echo "${COLRED}Error${COLRESET} writing failure to: ${results_html}" - } - echo "${line}" >> "${results}" || { - echo "${COLRED}Error${COLRESET} writing failure to: ${results}" - } - done - else - echo "
Test Passed
" >> "${results_html}" - fi - echo "
" >> "${results_html}" - echo "" >> "${results_html}" + ## use the $failed var here in case there is a panic, then $failure_count may show zero, but the replay was not successful + if [ ${failed} == "1" ];then + output=$(grep -r -h "Failed processing block" slice*.log) + IFS=$'\n' + for line in ${output}; do + echo "
${line}
" >> "${results_html}" || { + echo "${COLRED}Error${COLRESET} writing failure to: ${results_html}" + } + echo "${line}" >> "${results}" || { + echo "${COLRED}Error${COLRESET} writing failure to: ${results}" + } + done + else + echo "
Test Passed
" >> "${results_html}" + fi + echo "
" >> "${results_html}" + echo "" >> "${results_html}" } ## show usage and exit usage() { - echo - echo "Usage:" - echo " ${COLBOLD}${0}${COLRESET}" - echo " ${COLYELLOW}--testing${COLRESET}: only check a small number of blocks" - echo " ${COLYELLOW}-t|--terminal${COLRESET}: more terminal friendly output" - echo " ${COLYELLOW}-n|--network${COLRESET}: run block replay against specific network (default: mainnet)" - echo " ${COLYELLOW}-b|--branch${COLRESET}: branch of stacks-core to build stacks-inspect from (default: develop)" - echo " ${COLYELLOW}-r|--reserved${COLRESET}: how many cpu cores to reserve for system tasks" - echo - echo " ex: ${COLCYAN}${0} -t -u ${COLRESET}" - echo - exit 0 + echo + echo "Usage:" + echo " ${COLBOLD}${0}${COLRESET}" + echo " ${COLYELLOW}--testing${COLRESET}: only check a small number of blocks" + echo " ${COLYELLOW}-t|--terminal${COLRESET}: more terminal friendly output" + echo " ${COLYELLOW}-n|--network${COLRESET}: run block replay against specific network (default: mainnet)" + echo " ${COLYELLOW}-b|--branch${COLRESET}: branch of stacks-core to build stacks-inspect from (default: develop)" + echo " ${COLYELLOW}-c|--chainstate${COLRESET}: local chainstate copy to use instead of downloading a chainstaet snapshot" + echo " ${COLYELLOW}-l|--logdir${COLRESET}: use existing log directory" + echo " ${COLYELLOW}-r|--reserved${COLRESET}: how many cpu cores to reserve for system tasks" + echo + echo " ex: ${COLCYAN}${0} -t -u ${COLRESET}" + echo + exit 0 } ## install missing dependencies -for cmd in curl tmux git wget tar gzip grep cargo pgrep; do - command -v "${cmd}" >/dev/null 2>&1 || { - case "${cmd}" in - "cargo") - install_cargo - ;; - "pgrep") - package="procps" - ;; - *) - package="${cmd}" - ;; - esac - (sudo apt-get update && sudo apt-get install "${package}") || { - echo "${COLRED}Error${COLRESET} installing $package" - exit 1 - } - } +HAS_APT=1 +HAS_SUDO=1 +for cmd in apt-get sudo curl tmux git wget tar gzip grep cargo pgrep tput find; do + # in Alpine, `find` might be linked to `busybox` and won't work + if [ "${cmd}" == "find" ] && [ -L "${cmd}" ]; then + rp= + rp="$(readlink "$(command -v "${cmd}" || echo "NOTLINK")")" + if [ "${rp}" == "/bin/busybox" ]; then + echo "${COLRED}ERROR${COLRESET} Busybox 'find' is not supported. Please install 'findutils' or similar." + exit 1 + fi + fi + + command -v "${cmd}" >/dev/null 2>&1 || { + case "${cmd}" in + "apt-get") + echo "${COLYELLOW}WARN${COLRESET} 'apt-get' not found; automatic package installation will fail" + HAS_APT=0 + continue + ;; + "sudo") + echo "${COLYELLOW}WARN${COLRESET} 'sudo' not found; automatic package installation will fail" + HAS_SUDO=0 + continue + ;; + "cargo") + install_cargo + ;; + "pgrep") + package="procps" + ;; + *) + package="${cmd}" + ;; + esac + + if [[ ${HAS_APT} = 0 ]] || [[ ${HAS_SUDO} = 0 ]]; then + echo "${COLRED}Error${COLRESET} Missing command '${cmd}'" + exit 1 + fi + (sudo apt-get update && sudo apt-get install "${package}") || { + echo "${COLRED}Error${COLRESET} installing $package" + exit 1 + } + } done ## parse cmd-line args while [ ${#} -gt 0 ]; do - case ${1} in - --testing) - # only replay 1_000 blocks - TESTING=true - ;; - -t|--terminal) - # update terminal with progress (it's just printf to show in real-time that the replays are running) - TERM_OUT=true - ;; - -n|--network) - # required if not mainnet - if [ "${2}" == "" ]; then - echo "Missing required value for ${1}" - fi - NETWORK=${2} - shift - ;; - -b|--branch) - # build from specific branch - if [ "${2}" == "" ]; then - echo "Missing required value for ${1}" - fi - BRANCH=${2} - shift - ;; - -r|--RESERVED) - # reserve this many cpus for the system (default is 10) - if [ "${2}" == "" ]; then - echo "Missing required value for ${1}" - fi - if ! [[ "$2" =~ ^[0-9]+$ ]]; then - echo "ERROR: arg ($2) is not a number." >&2 - exit 1 - fi - RESERVED=${2} - shift - ;; - -h|--help|--usage) - # show usage/options and exit - usage - ;; - esac - shift + case ${1} in + --testing) + # only replay 1_000 blocks + TESTING=true + ;; + -t|--terminal) + # update terminal with progress (it's just printf to show in real-time that the replays are running) + TERM_OUT=true + ;; + -n|--network) + # required if not mainnet + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" + exit 1 + fi + NETWORK=${2} + shift + ;; + -b|--branch) + # build from specific branch + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" + exit 1 + fi + BRANCH=${2} + shift + ;; + -c|--chainstate) + # use a local chainstate + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" + exit 1 + fi + LOCAL_CHAINSTATE="${2}" + shift + ;; + -l|--logdir) + # use a given logdir + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" + exit 1 + fi + LOG_DIR="${2}" + shift + ;; + -r|--RESERVED) + # reserve this many cpus for the system (default is 10) + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" + fi + if ! [[ "$2" =~ ^[0-9]+$ ]]; then + echo "ERROR: arg ($2) is not a number." >&2 + exit 1 + fi + RESERVED=${2} + shift + ;; + -h|--help|--usage) + # show usage/options and exit + usage + ;; + esac + shift done ## clear display before starting tput reset echo "Replay Started: ${COLYELLOW}$(date)${COLRESET}" -build_stacks_inspect ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) -configure_replay_slices ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) +build_stacks_inspect ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) +configure_replay_slices ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) setup_replay ## configure logdir and tmux sessions start_replay ## replay pre-nakamoto blocks (2.x) start_replay nakamoto ## replay nakamoto blocks diff --git a/docs/rpc/entities/contracts/read-only-function-args.schema.json b/docs/rpc/entities/contracts/read-only-function-args.schema.json index ff457fe0319..7d497337561 100644 --- a/docs/rpc/entities/contracts/read-only-function-args.schema.json +++ b/docs/rpc/entities/contracts/read-only-function-args.schema.json @@ -1,7 +1,7 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "title": "ReadOnlyFunctionArgs", - "description": "Describes representation of a Type-0 Stacks 2.0 transaction. https://github.com/blockstack/stacks-blockchain/blob/master/sip/sip-005-blocks-and-transactions.md#type-0-transferring-an-asset", + "description": "Describes representation of a Type-0 Stacks 2.0 transaction. https://github.com/stacksgov/sips/blob/main/sips/sip-005/sip-005-blocks-and-transactions.md#type-0-transferring-an-asset", "type": "object", "required": ["sender", "arguments"], "properties": { diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 0c1bcf63a3c..ad0583a0576 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -29,9 +29,11 @@ use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; +use blockstack_lib::net::api::{prefix_hex, prefix_opt_hex}; use blockstack_lib::net::stackerdb::MINER_SLOT_COUNT; use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::version_string; +use clarity::types::chainstate::StacksBlockId; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::QualifiedContractIdentifier; use serde::{Deserialize, Serialize}; @@ -202,13 +204,21 @@ pub enum SignerEvent { burn_height: u64, /// the burn hash for the newly processed burn block burn_header_hash: BurnchainHeaderHash, + /// the consensus hash for the newly processed burn block + consensus_hash: ConsensusHash, /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, /// A new processed Stacks block was received from the node with the given block hash NewBlock { - /// The block header hash for the newly processed stacks block - block_hash: Sha512Trunc256Sum, + /// The stacks block ID (or index block hash) of the new block + block_id: StacksBlockId, + /// The consensus hash of the block (either the tenure it was produced during for Stacks 3.0 + /// or the burn block that won the sortition in Stacks 2.0) + consensus_hash: ConsensusHash, + /// The signer sighash for the newly processed stacks block. If the newly processed block is a 2.0 + /// block, there is *no* signer sighash + signer_sighash: Option, /// The block height for the newly processed stacks block block_height: u64, }, @@ -551,37 +561,39 @@ impl TryFrom for SignerEvent { #[derive(Debug, Deserialize)] struct BurnBlockEvent { - burn_block_hash: String, + #[serde(with = "prefix_hex")] + burn_block_hash: BurnchainHeaderHash, burn_block_height: u64, reward_recipients: Vec, reward_slot_holders: Vec, burn_amount: u64, + #[serde(with = "prefix_hex")] + consensus_hash: ConsensusHash, } impl TryFrom for SignerEvent { type Error = EventError; fn try_from(burn_block_event: BurnBlockEvent) -> Result { - let burn_header_hash = burn_block_event - .burn_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - BurnchainHeaderHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - Ok(SignerEvent::NewBurnBlock { burn_height: burn_block_event.burn_block_height, received_time: SystemTime::now(), - burn_header_hash, + burn_header_hash: burn_block_event.burn_block_hash, + consensus_hash: burn_block_event.consensus_hash, }) } } #[derive(Debug, Deserialize)] struct BlockEvent { - block_hash: String, + #[serde(with = "prefix_hex")] + index_block_hash: StacksBlockId, + #[serde(with = "prefix_opt_hex")] + signer_signature_hash: Option, + #[serde(with = "prefix_hex")] + consensus_hash: ConsensusHash, + #[serde(with = "prefix_hex")] + block_hash: BlockHeaderHash, block_height: u64, } @@ -589,16 +601,10 @@ impl TryFrom for SignerEvent { type Error = EventError; fn try_from(block_event: BlockEvent) -> Result { - let block_hash: Sha512Trunc256Sum = block_event - .block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - Sha512Trunc256Sum::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; Ok(SignerEvent::NewBlock { - block_hash, + signer_sighash: block_event.signer_signature_hash, + block_id: block_event.index_block_hash, + consensus_hash: block_event.consensus_hash, block_height: block_event.block_height, }) } diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index ab3c45c6d21..e09920e573d 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -25,6 +25,7 @@ use std::fmt::{Debug, Display}; use std::io::{Read, Write}; +use std::marker::PhantomData; use std::net::{SocketAddr, TcpListener, TcpStream}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; @@ -51,7 +52,7 @@ use clarity::util::hash::Sha256Sum; use clarity::util::retry::BoundReader; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::serialization::SerializationError; -use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; +use clarity::vm::types::{QualifiedContractIdentifier, ResponseData, TupleData}; use clarity::vm::Value; use hashbrown::{HashMap, HashSet}; use serde::{Deserialize, Serialize}; @@ -61,7 +62,8 @@ use stacks_common::codec::{ StacksMessageCodec, }; use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, }; @@ -76,12 +78,17 @@ use crate::{ /// Maximum size of the [BlockResponseData] serialized bytes pub const BLOCK_RESPONSE_DATA_MAX_SIZE: u32 = 2 * 1024 * 1024; // 2MB +/// Maximum size of the state machine update messages +pub const STATE_MACHINE_UPDATE_MAX_SIZE: u32 = 2 * 1024 * 1024; // 2MB + define_u8_enum!( /// Enum representing the stackerdb message identifier: this is /// the contract index in the signers contracts (i.e., X in signers-0-X) MessageSlotID { /// Block Response message from signers - BlockResponse = 1 + BlockResponse = 1, + /// Signer State Machine Update + StateMachineUpdate = 2 }); define_u8_enum!( @@ -122,7 +129,9 @@ SignerMessageTypePrefix { /// Mock block signature message from Epoch 2.5 signers MockSignature = 4, /// Mock block message from Epoch 2.5 miners - MockBlock = 5 + MockBlock = 5, + /// State machine update + StateMachineUpdate = 6 }); #[cfg_attr(test, mutants::skip)] @@ -168,6 +177,7 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::MockProposal(_) => SignerMessageTypePrefix::MockProposal, SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, SignerMessage::MockBlock(_) => SignerMessageTypePrefix::MockBlock, + SignerMessage::StateMachineUpdate(_) => SignerMessageTypePrefix::StateMachineUpdate, } } } @@ -187,6 +197,8 @@ pub enum SignerMessage { MockProposal(MockProposal), /// A mock block from the epoch 2.5 miners MockBlock(MockBlock), + /// A state machine update + StateMachineUpdate(StateMachineUpdate), } impl SignerMessage { @@ -201,6 +213,7 @@ impl SignerMessage { | Self::MockProposal(_) | Self::MockBlock(_) => None, Self::BlockResponse(_) | Self::MockSignature(_) => Some(MessageSlotID::BlockResponse), // Mock signature uses the same slot as block response since its exclusively for epoch 2.5 testing + Self::StateMachineUpdate(_) => Some(MessageSlotID::StateMachineUpdate), } } } @@ -217,6 +230,9 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), SignerMessage::MockProposal(message) => message.consensus_serialize(fd), SignerMessage::MockBlock(block) => block.consensus_serialize(fd), + SignerMessage::StateMachineUpdate(state_machine_update) => { + state_machine_update.consensus_serialize(fd) + } }?; Ok(()) } @@ -250,6 +266,10 @@ impl StacksMessageCodec for SignerMessage { let block = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::MockBlock(block) } + SignerMessageTypePrefix::StateMachineUpdate => { + let state_machine_update = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::StateMachineUpdate(state_machine_update) + } }; Ok(message) } @@ -274,7 +294,7 @@ pub struct PeerInfo { pub stacks_tip: BlockHeaderHash, /// The stacks tip height pub stacks_tip_height: u64, - /// The pox consensus + /// The consensus hash of the current burnchain tip pub pox_consensus: ConsensusHash, /// The server version pub server_version: String, @@ -525,6 +545,217 @@ impl StacksMessageCodec for MockBlock { } } +/// Message for updates to the Signer State machine +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct StateMachineUpdate { + /// The active signing protocol version + pub active_signer_protocol_version: u64, + /// The highest supported signing protocol by the local signer + pub local_supported_signer_protocol_version: u64, + /// The actual content of the state machine update message (this is a versioned enum) + pub content: StateMachineUpdateContent, + // Prevent manual construction of this struct + no_manual_construct: PhantomData<()>, +} + +/// Versioning enum for StateMachineUpdate messages +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub enum StateMachineUpdateContent { + /// Version 0 + V0 { + /// The tip burn block (i.e., the latest bitcoin block) seen by this signer + burn_block: ConsensusHash, + /// The tip burn block height (i.e., the latest bitcoin block) seen by this signer + burn_block_height: u64, + /// The signer's view of who the current miner should be (and their tenure building info) + current_miner: StateMachineUpdateMinerState, + }, +} + +/// Message for update the Signer State infos +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub enum StateMachineUpdateMinerState { + /// There is an active miner + ActiveMiner { + /// The pubkeyhash of the current miner's signing key + current_miner_pkh: Hash160, + /// The tenure ID of the current miner's active tenure + tenure_id: ConsensusHash, + /// The tenure that the current miner is building on top of + parent_tenure_id: ConsensusHash, + /// The last block of the parent tenure (which should be + /// the block that the next tenure starts from) + parent_tenure_last_block: StacksBlockId, + /// The height of the last block of the parent tenure (which should be + /// the block that the next tenure starts from) + parent_tenure_last_block_height: u64, + }, + /// The signer doesn't believe there's any valid miner + NoValidMiner, +} + +impl StateMachineUpdate { + /// Construct a StateMachineUpdate message, checking to ensure that the + /// supplied content is supported by the supplied protocol versions. + pub fn new( + active_signer_protocol_version: u64, + local_supported_signer_protocol_version: u64, + content: StateMachineUpdateContent, + ) -> Result { + if !content.is_protocol_version_compatible(active_signer_protocol_version) { + return Err(CodecError::DeserializeError(format!("StateMachineUpdateContent is incompatible with protocol version: {active_signer_protocol_version}"))); + } + Ok(Self { + active_signer_protocol_version, + local_supported_signer_protocol_version, + content, + no_manual_construct: PhantomData, + }) + } +} + +impl StateMachineUpdateMinerState { + fn get_variant_id(&self) -> u8 { + match self { + StateMachineUpdateMinerState::NoValidMiner => 0, + StateMachineUpdateMinerState::ActiveMiner { .. } => 1, + } + } +} + +impl StacksMessageCodec for StateMachineUpdateMinerState { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.get_variant_id().consensus_serialize(fd)?; + match self { + StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + } => { + current_miner_pkh.consensus_serialize(fd)?; + tenure_id.consensus_serialize(fd)?; + parent_tenure_id.consensus_serialize(fd)?; + parent_tenure_last_block.consensus_serialize(fd)?; + parent_tenure_last_block_height.consensus_serialize(fd)?; + } + StateMachineUpdateMinerState::NoValidMiner => {} + } + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let variant_id: u8 = read_next(fd)?; + match variant_id { + 0 => Ok(StateMachineUpdateMinerState::NoValidMiner), + 1 => { + let current_miner_pkh = read_next(fd)?; + let tenure_id = read_next(fd)?; + let parent_tenure_id = read_next(fd)?; + let parent_tenure_last_block = read_next(fd)?; + let parent_tenure_last_block_height = read_next(fd)?; + Ok(StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + }) + } + other => Err(CodecError::DeserializeError(format!( + "Unexpected miner state variant in StateMachineUpdate: {other}" + ))), + } + } +} + +impl StateMachineUpdateContent { + // Is the protocol version specified one that uses self's content? + fn is_protocol_version_compatible(&self, version: u64) -> bool { + match self { + Self::V0 { .. } => version == 0, + } + } + + fn serialize(&self, fd: &mut W) -> Result<(), CodecError> { + match self { + Self::V0 { + burn_block, + burn_block_height, + current_miner, + } => { + burn_block.consensus_serialize(fd)?; + burn_block_height.consensus_serialize(fd)?; + current_miner.consensus_serialize(fd)?; + } + } + Ok(()) + } + fn deserialize(fd: &mut R, version: u64) -> Result { + match version { + 0 => { + let burn_block = read_next(fd)?; + let burn_block_height = read_next(fd)?; + let current_miner = read_next(fd)?; + Ok(Self::V0 { + burn_block, + burn_block_height, + current_miner, + }) + } + other => Err(CodecError::DeserializeError(format!( + "Unknown state machine update version: {other}" + ))), + } + } +} + +impl StacksMessageCodec for StateMachineUpdate { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.active_signer_protocol_version + .consensus_serialize(fd)?; + self.local_supported_signer_protocol_version + .consensus_serialize(fd)?; + let mut buffer = Vec::new(); + self.content.serialize(&mut buffer)?; + let buff_len = u32::try_from(buffer.len()) + .map_err(|_e| CodecError::SerializeError("Message length exceeded u32".into()))?; + if buff_len > STATE_MACHINE_UPDATE_MAX_SIZE { + return Err(CodecError::SerializeError(format!( + "Message length exceeded max: {STATE_MACHINE_UPDATE_MAX_SIZE}" + ))); + } + buff_len.consensus_serialize(fd)?; + fd.write_all(&buffer).map_err(CodecError::WriteError) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let active_signer_protocol_version = read_next(fd)?; + let local_supported_signer_protocol_version = read_next(fd)?; + let content_len: u32 = read_next(fd)?; + if content_len > STATE_MACHINE_UPDATE_MAX_SIZE { + return Err(CodecError::DeserializeError(format!( + "Message length exceeded max: {STATE_MACHINE_UPDATE_MAX_SIZE}" + ))); + } + let buffer_len = usize::try_from(content_len) + .expect("FATAL: cannot process signer messages when usize < u32"); + let mut buffer = vec![0u8; buffer_len]; + fd.read_exact(&mut buffer).map_err(CodecError::ReadError)?; + let content = StateMachineUpdateContent::deserialize( + &mut buffer.as_slice(), + active_signer_protocol_version, + )?; + + Self::new( + active_signer_protocol_version, + local_supported_signer_protocol_version, + content, + ) + } +} + define_u8_enum!( /// Enum representing the reject code type prefix RejectCodeTypePrefix { @@ -809,14 +1040,14 @@ impl std::fmt::Display for BlockResponse { BlockResponse::Accepted(a) => { write!( f, - "BlockAccepted: signer_sighash = {}, signature = {}, version = {}", + "BlockAccepted: signer_signature_hash = {}, signature = {}, version = {}", a.signer_signature_hash, a.signature, a.metadata.server_version ) } BlockResponse::Rejected(r) => { write!( f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}, signature = {}, version = {}", + "BlockRejected: signer_signature_hash = {}, code = {}, reason = {}, signature = {}, version = {}", r.reason_code, r.reason, r.signer_signature_hash, r.signature, r.metadata.server_version ) } @@ -875,11 +1106,11 @@ impl BlockResponse { } } - /// The signer signature hash for the block response - pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { + /// Get the block response data from the block response + pub fn get_response_data(&self) -> &BlockResponseData { match self { - BlockResponse::Accepted(accepted) => accepted.signer_signature_hash, - BlockResponse::Rejected(rejection) => rejection.signer_signature_hash, + BlockResponse::Accepted(accepted) => &accepted.response_data, + BlockResponse::Rejected(rejection) => &rejection.response_data, } } @@ -1474,6 +1705,12 @@ impl From for SignerMessage { } } +impl From for SignerMessage { + fn from(update: StateMachineUpdate) -> Self { + Self::StateMachineUpdate(update) + } +} + #[cfg(test)] mod test { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; @@ -1981,4 +2218,101 @@ mod test { RejectReason::Unknown(RejectReasonPrefix::Unknown as u8) ); } + + #[test] + fn version_check_state_machine_update() { + let error = StateMachineUpdate::new( + 1, + 3, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, + }, + ) + .unwrap_err(); + assert!(matches!(error, CodecError::DeserializeError(_))); + } + + #[test] + fn deserialize_state_machine_update_v0() { + let signer_message = StateMachineUpdate::new( + 0, + 3, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, + }, + ) + .unwrap(); + + let mut bytes = vec![]; + signer_message.consensus_serialize(&mut bytes).unwrap(); + + // check for raw content for avoiding regressions when structure changes + let raw_signer_message: Vec<&[u8]> = vec![ + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 0], + /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 3], + /* content_len*/ &[0, 0, 0, 129], + /* burn_block*/ &[0x55; 20], + /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], + /* current_miner_variant */ &[0x01], + /* current_miner_pkh */ &[0xab; 20], + /* tenure_id*/ &[0x44; 20], + /* parent_tenure_id*/ &[0x22; 20], + /* parent_tenure_last_block */ &[0x33; 32], + /* parent_tenure_last_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 1], + ]; + + assert_eq!(bytes, raw_signer_message.concat()); + + let signer_message_deserialized = + StateMachineUpdate::consensus_deserialize(&mut &bytes[..]).unwrap(); + + assert_eq!(signer_message, signer_message_deserialized); + + let signer_message = StateMachineUpdate::new( + 0, + 4, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::NoValidMiner, + }, + ) + .unwrap(); + + let mut bytes = vec![]; + signer_message.consensus_serialize(&mut bytes).unwrap(); + + // check for raw content for avoiding regressions when structure changes + let raw_signer_message: Vec<&[u8]> = vec![ + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 0], + /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 4], + /* content_len*/ &[0, 0, 0, 29], + /* burn_block*/ &[0x55; 20], + /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], + /* current_miner_variant */ &[0x00], + ]; + + assert_eq!(bytes, raw_signer_message.concat()); + + let signer_message_deserialized = + StateMachineUpdate::consensus_deserialize(&mut &bytes[..]).unwrap(); + + assert_eq!(signer_message, signer_message_deserialized); + } } diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index e298de65f29..010a9dcf162 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -21,9 +21,6 @@ use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, ResponseData, TupleData}; use clarity::vm::Value; #[cfg(any(test, feature = "testing"))] -use slog::slog_debug; -use slog::slog_error; -#[cfg(any(test, feature = "testing"))] use stacks_common::debug; use stacks_common::types::StacksEpochId; use stacks_common::{error, test_debug}; diff --git a/pox-locking/src/events_24.rs b/pox-locking/src/events_24.rs index 3f54794bb75..b827988e948 100644 --- a/pox-locking/src/events_24.rs +++ b/pox-locking/src/events_24.rs @@ -20,9 +20,6 @@ use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; #[cfg(any(test, feature = "testing"))] -use slog::slog_debug; -use slog::slog_error; -#[cfg(any(test, feature = "testing"))] use stacks_common::debug; use stacks_common::{error, test_debug}; diff --git a/pox-locking/src/lib.rs b/pox-locking/src/lib.rs index 63380212dcb..fe5ea6b8991 100644 --- a/pox-locking/src/lib.rs +++ b/pox-locking/src/lib.rs @@ -30,7 +30,6 @@ use clarity::vm::contexts::GlobalContext; use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::Value; -use slog::slog_warn; use stacks_common::types::StacksEpochId; use stacks_common::warn; diff --git a/pox-locking/src/pox_1.rs b/pox-locking/src/pox_1.rs index e28ccb917e2..4cc7ffe0ea5 100644 --- a/pox-locking/src/pox_1.rs +++ b/pox-locking/src/pox_1.rs @@ -23,7 +23,6 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; use clarity::vm::types::PrincipalData; use clarity::vm::Value; -use slog::slog_debug; use stacks_common::debug; use crate::LockingError; diff --git a/pox-locking/src/pox_2.rs b/pox-locking/src/pox_2.rs index 47f30faa4db..996a0c52d5a 100644 --- a/pox-locking/src/pox_2.rs +++ b/pox-locking/src/pox_2.rs @@ -23,7 +23,6 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{Environment, Value}; -use slog::{slog_debug, slog_error}; use stacks_common::{debug, error}; use crate::events::synthesize_pox_event_info; diff --git a/pox-locking/src/pox_3.rs b/pox-locking/src/pox_3.rs index 8c2616b3738..2aab7caf2b3 100644 --- a/pox-locking/src/pox_3.rs +++ b/pox-locking/src/pox_3.rs @@ -23,7 +23,6 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{Environment, Value}; -use slog::{slog_debug, slog_error}; use stacks_common::{debug, error}; use crate::events::synthesize_pox_event_info; diff --git a/pox-locking/src/pox_4.rs b/pox-locking/src/pox_4.rs index 8eda9a2e897..2853c892124 100644 --- a/pox-locking/src/pox_4.rs +++ b/pox-locking/src/pox_4.rs @@ -23,7 +23,6 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{Environment, Value}; -use slog::{slog_debug, slog_error}; use stacks_common::{debug, error}; use crate::events::synthesize_pox_event_info; diff --git a/sip/README.md b/sip/README.md deleted file mode 100644 index bce5ef7a907..00000000000 --- a/sip/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Stacks Improvement Proposals (SIPs) - -This directory formerly contained all of the in-progress Stacks Improvement Proposals before the Stacks 2.0 mainnet launched. - -The SIPs are now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-000-stacks-improvement-proposal-process.md b/sip/sip-000-stacks-improvement-proposal-process.md deleted file mode 100644 index 987c9ca6cfb..00000000000 --- a/sip/sip-000-stacks-improvement-proposal-process.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-000 Stacks Improvement Proposal Process - -This document formerly contained SIP-000 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-001-burn-election.md b/sip/sip-001-burn-election.md deleted file mode 100644 index 19a4aca215c..00000000000 --- a/sip/sip-001-burn-election.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-001 Burn Election - -This document formerly contained SIP-001 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-001/sip-001-burn-election.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-002-smart-contract-language.md b/sip/sip-002-smart-contract-language.md deleted file mode 100644 index 26503048b5e..00000000000 --- a/sip/sip-002-smart-contract-language.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-002 Smart Contract Language - -This document formerly contained SIP-002 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-002/sip-002-smart-contract-language.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-003-peer-network.md b/sip/sip-003-peer-network.md deleted file mode 100644 index 84ae5dfd25c..00000000000 --- a/sip/sip-003-peer-network.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-003 Peer Network - -This document formerly contained SIP-003 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-003/sip-003-peer-network.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-004-materialized-view.md b/sip/sip-004-materialized-view.md deleted file mode 100644 index def065a1751..00000000000 --- a/sip/sip-004-materialized-view.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-004 Cryptographic Committment to Materialized Views - -This document formerly contained SIP-004 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-004/sip-004-materialized-view.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-005-blocks-and-transactions.md b/sip/sip-005-blocks-and-transactions.md deleted file mode 100644 index eda0f300045..00000000000 --- a/sip/sip-005-blocks-and-transactions.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-005 Blocks, Transactions, and Accounts - -This document formerly contained SIP-005 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-005/sip-005-blocks-and-transactions.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-006-runtime-cost-assessment.md b/sip/sip-006-runtime-cost-assessment.md deleted file mode 100644 index 019e6173f01..00000000000 --- a/sip/sip-006-runtime-cost-assessment.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-006 Clarity Execution Cost Assessment - -This document formerly contained SIP-006 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-006/sip-006-runtime-cost-assessment.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-007-stacking-consensus.md b/sip/sip-007-stacking-consensus.md deleted file mode 100644 index 37afb07230c..00000000000 --- a/sip/sip-007-stacking-consensus.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-007 Stacking Consensus - -This document formerly contained SIP-007 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-007/sip-007-stacking-consensus.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-008-analysis-cost-assessment.md b/sip/sip-008-analysis-cost-assessment.md deleted file mode 100644 index 72813cdc085..00000000000 --- a/sip/sip-008-analysis-cost-assessment.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-008 Clarity Parsing and Analysis Cost Assessment - -This document formerly contained SIP-008 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-008/sip-008-analysis-cost-assessment.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 491a6863507..82f034e20cc 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -1,14 +1,25 @@ [package] name = "stacks-common" version = "0.0.1" -authors = [ "Jude Nelson ", - "Aaron Blankstein ", - "Ludo Galabru " ] +authors = [ + "Jude Nelson ", + "Aaron Blankstein ", + "Ludo Galabru ", +] license = "GPLv3" homepage = "https://github.com/blockstack/stacks-blockchain" repository = "https://github.com/blockstack/stacks-blockchain" description = "Common modules for blockstack_lib, libclarity" -keywords = [ "stacks", "stx", "bitcoin", "crypto", "blockstack", "decentralized", "dapps", "blockchain" ] +keywords = [ + "stacks", + "stx", + "bitcoin", + "crypto", + "blockstack", + "decentralized", + "dapps", + "blockchain", +] readme = "README.md" resolver = "2" edition = "2021" @@ -25,7 +36,7 @@ serde_derive = "1" sha3 = "0.10.1" ripemd = "0.1.1" lazy_static = "1.4.0" -slog = { version = "2.5.2", features = [ "max_level_trace" ] } +slog = { version = "2.5.2", features = ["max_level_trace"] } slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" @@ -37,7 +48,12 @@ rusqlite = { workspace = true, optional = true } nix = "0.23" [target.'cfg(windows)'.dependencies] -winapi = { version = "0.3", features = ["consoleapi", "handleapi", "synchapi", "winbase"] } +winapi = { version = "0.3", features = [ + "consoleapi", + "handleapi", + "synchapi", + "winbase", +] } [target.'cfg(windows)'.dev-dependencies] winapi = { version = "0.3", features = ["fileapi", "processenv", "winnt"] } @@ -46,30 +62,33 @@ winapi = { version = "0.3", features = ["fileapi", "processenv", "winnt"] } version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] -[dependencies.secp256k1] -version = "0.24.3" -features = ["serde", "recovery"] - [dependencies.ed25519-dalek] workspace = true [dependencies.curve25519-dalek] -version = "=2.0.0" +version = "4.1.3" features = ["serde"] [dependencies.time] version = "0.2.23" features = ["std"] +[target.'cfg(not(target_family = "wasm"))'.dependencies] +secp256k1 = { version = "0.24.3", features = ["serde", "recovery"] } + +[target.'cfg(target_family = "wasm")'.dependencies] +libsecp256k1 = { version = "0.7.0" } + [dev-dependencies] rand_core = { workspace = true } +proptest = "1.6.0" [features] -default = ["canonical", "developer-mode"] -canonical = ["rusqlite"] +default = ["developer-mode"] developer-mode = [] slog_json = ["slog-json"] -testing = ["canonical"] +rusqlite = ["dep:rusqlite"] +testing = [] serde = [] bech32_std = [] bech32_strict = [] diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 065dd5e8141..b4d61397fa4 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; @@ -106,7 +106,7 @@ impl<'de, const MAX_SIZE: u16> Deserialize<'de> for BitVec { } } -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] impl FromSql for BitVec { fn column_result(value: ValueRef<'_>) -> FromSqlResult { let bytes = hex_bytes(value.as_str()?).map_err(|e| FromSqlError::Other(Box::new(e)))?; @@ -115,7 +115,7 @@ impl FromSql for BitVec { } } -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] impl ToSql for BitVec { fn to_sql(&self) -> rusqlite::Result> { let hex = bytes_to_hex(self.serialize_to_vec().as_slice()); diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index abfce8349f0..3847e314537 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -50,7 +50,6 @@ impl_array_newtype!(Ripemd160Hash, u8, 20); /// A Bitcoin hash160, 20-bytes, computed from x as RIPEMD160(SHA256(x)) pub struct Hash160([u8; 20]); impl_array_newtype!(Hash160, u8, 20); -impl_byte_array_rusqlite_only!(Hash160); impl Hash160 { /// Convert the Hash160 inner bytes to a non-prefixed hex string diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 3cb4a94facb..80f9ecf3fd4 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -19,7 +19,7 @@ use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; use std::sync::LazyLock; -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] pub mod sqlite; use crate::address::c32::{c32_address, c32_address_decode}; diff --git a/stacks-common/src/types/sqlite.rs b/stacks-common/src/types/sqlite.rs index 183ec61fbc6..57010ea118e 100644 --- a/stacks-common/src/types/sqlite.rs +++ b/stacks-common/src/types/sqlite.rs @@ -16,7 +16,7 @@ use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; -use super::chainstate::VRFSeed; +use super::chainstate::{StacksAddress, VRFSeed}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; use crate::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, TrieHash, @@ -42,6 +42,13 @@ impl ToSql for Sha256dHash { } } +impl rusqlite::types::ToSql for StacksAddress { + fn to_sql(&self) -> rusqlite::Result { + let addr_str = self.to_string(); + Ok(addr_str.into()) + } +} + // Implement rusqlite traits for a bunch of structs that used to be defined // in the chainstate code impl_byte_array_rusqlite_only!(ConsensusHash); diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 77a4950f818..0fba87a16c8 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -268,7 +268,7 @@ macro_rules! trace { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Trace.is_at_least(cur_level) { - slog_trace!($crate::util::log::LOGGER, $($arg)*) + slog::slog_trace!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -278,7 +278,7 @@ macro_rules! error { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Error.is_at_least(cur_level) { - slog_error!($crate::util::log::LOGGER, $($arg)*) + slog::slog_error!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -288,7 +288,7 @@ macro_rules! warn { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Warning.is_at_least(cur_level) { - slog_warn!($crate::util::log::LOGGER, $($arg)*) + slog::slog_warn!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -298,7 +298,7 @@ macro_rules! info { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Info.is_at_least(cur_level) { - slog_info!($crate::util::log::LOGGER, $($arg)*) + slog::slog_info!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -308,7 +308,7 @@ macro_rules! debug { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Debug.is_at_least(cur_level) { - slog_debug!($crate::util::log::LOGGER, $($arg)*) + slog::slog_debug!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -318,7 +318,7 @@ macro_rules! fatal { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Critical.is_at_least(cur_level) { - slog_crit!($crate::util::log::LOGGER, $($arg)*) + slog::slog_crit!($crate::util::log::LOGGER, $($arg)*) } }) } diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs new file mode 100644 index 00000000000..5a7cdc30d00 --- /dev/null +++ b/stacks-common/src/util/lru_cache.rs @@ -0,0 +1,652 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fmt::Display; + +use hashbrown::HashMap; + +/// Node in the doubly linked list +struct Node { + key: K, + value: V, + dirty: bool, + next: usize, + prev: usize, +} + +impl Display for Node { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}={} ({}) [prev={}, next={}]", + self.key, + self.value, + if self.dirty { "dirty" } else { "clean" }, + self.prev, + self.next + ) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct LruCacheCorrupted; + +impl std::fmt::Display for LruCacheCorrupted { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "LRU cache is in a corrupted state") + } +} + +impl std::error::Error for LruCacheCorrupted {} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FlushError { + LruCacheCorrupted, + FlushError(E), +} + +impl From for FlushError { + fn from(e: E) -> Self { + FlushError::FlushError(e) + } +} + +/// LRU cache +pub struct LruCache { + capacity: usize, + /// Map from address to an offset in the linked list + cache: HashMap, + /// Doubly linked list of values in order of most recently used + order: Vec>, + /// Index of the head of the linked list -- the most recently used element + head: usize, + /// Index of the tail of the linked list -- the least recently used element + tail: usize, +} + +impl Display for LruCache { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "LruCache (capacity={}, head={}, tail={})", + self.capacity, self.head, self.tail + )?; + let mut curr = self.head; + while curr != self.capacity { + let Some(node) = self.order.get(curr) else { + writeln!(f, " ")?; + break; + }; + writeln!(f, " {}", node)?; + curr = node.next; + } + Ok(()) + } +} + +impl LruCache { + /// Create a new LRU cache with the given capacity (> 0) + pub fn new(mut capacity: usize) -> Self { + if capacity == 0 { + error!("Capacity must be greater than 0. Defaulting to 1024."); + capacity = 1024; + } + + LruCache { + capacity, + cache: HashMap::new(), + order: Vec::with_capacity(capacity), + head: capacity, + tail: capacity, + } + } + + /// Get the value for the given key + /// Returns an error iff the cache is corrupted and should be discarded + pub fn get(&mut self, key: &K) -> Result, LruCacheCorrupted> { + if let Some(&index) = self.cache.get(key) { + self.move_to_head(index)?; + let node = self.order.get(index).ok_or(LruCacheCorrupted)?; + Ok(Some(node.value)) + } else { + Ok(None) + } + } + + /// Insert a key-value pair into the cache, marking it as dirty. + /// Returns an error iff the cache is corrupted and should be discarded + /// Returns `Ok(Some((K, V)))` if a dirty value was evicted. + pub fn insert(&mut self, key: K, value: V) -> Result, LruCacheCorrupted> { + self.insert_with_dirty(key, value, true) + } + + /// Insert a key-value pair into the cache, marking it as clean. + /// Returns an error iff the cache is corrupted and should be discarded + /// Returns `Ok(Some((K, V)))` if a dirty value was evicted. + pub fn insert_clean(&mut self, key: K, value: V) -> Result, LruCacheCorrupted> { + self.insert_with_dirty(key, value, false) + } + + /// Insert a key-value pair into the cache + /// Returns an error iff the cache is corrupted and should be discarded + /// Returns `Ok(Some((K, V)))` if a dirty value was evicted. + pub fn insert_with_dirty( + &mut self, + key: K, + value: V, + dirty: bool, + ) -> Result, LruCacheCorrupted> { + if let Some(&index) = self.cache.get(&key) { + // Update an existing node + let node = self.order.get_mut(index).ok_or(LruCacheCorrupted)?; + node.value = value; + node.dirty = dirty; + self.move_to_head(index)?; + Ok(None) + } else { + let mut evicted = None; + // This is a new key + let index = if self.cache.len() == self.capacity { + // We've reached capacity. Evict the least-recently used value + // and reuse its node + let index = self.evict_lru()?; + let tail_node = self.order.get_mut(index).ok_or(LruCacheCorrupted)?; + + // Replace the key with the new key, saving the old key + let replaced_key = std::mem::replace(&mut tail_node.key, key.clone()); + + // Save the evicted key-value pair, if it was dirty + if tail_node.dirty { + evicted = Some((replaced_key, tail_node.value)); + }; + + // Update the evicted node with the new key-value pair + tail_node.value = value; + tail_node.dirty = dirty; + + // Insert the new key-value pair into the cache + self.cache.insert(key, index); + + index + } else { + // Create a new node, add it to the cache + let index = self.order.len(); + let node = Node { + key: key.clone(), + value, + dirty, + next: self.capacity, + prev: self.capacity, + }; + self.order.push(node); + self.cache.insert(key, index); + index + }; + + // Put the new or reused node at the head of the LRU list + self.attach_as_head(index)?; + + Ok(evicted) + } + } + + /// Flush all dirty values in the cache, calling the given function, `f`, + /// for each dirty value. + /// Outer result is an error iff the cache is corrupted and should be discarded. + /// Inner result is an error iff the function, `f`, returns an error. + pub fn flush( + &mut self, + mut f: impl FnMut(&K, V) -> Result<(), E>, + ) -> Result<(), FlushError> { + for node in self.order.iter_mut().filter(|n| n.dirty) { + f(&node.key, node.value)?; + node.dirty = false; + } + Ok(()) + } + + /// Helper function to remove a node from the linked list (by index) + fn detach_node(&mut self, index: usize) -> Result<(), LruCacheCorrupted> { + let node = self.order.get(index).ok_or(LruCacheCorrupted)?; + let prev = node.prev; + let next = node.next; + + if index == self.tail { + // If this is the last node, update the tail to point to its previous node + self.tail = prev; + } else { + // Else, update the next node to point to the previous node + let next_node = self.order.get_mut(next).ok_or(LruCacheCorrupted)?; + next_node.prev = prev; + } + + if index == self.head { + // If this is the first node, update the head to point to the next node + self.head = next; + } else { + // Else, update the previous node to point to the next node + let prev_node = self.order.get_mut(prev).ok_or(LruCacheCorrupted)?; + prev_node.next = next; + } + + Ok(()) + } + + /// Helper function to attach a node as the head of the linked list + fn attach_as_head(&mut self, index: usize) -> Result<(), LruCacheCorrupted> { + let node = self.order.get_mut(index).ok_or(LruCacheCorrupted)?; + node.prev = self.capacity; + node.next = self.head; + + if self.head != self.capacity { + // If there is a head, update its previous pointer to this one + let head_node = self.order.get_mut(self.head).ok_or(LruCacheCorrupted)?; + head_node.prev = index; + } else { + // Else, the list was empty, so update the tail + self.tail = index; + } + self.head = index; + Ok(()) + } + + /// Helper function to move a node to the head of the linked list + fn move_to_head(&mut self, index: usize) -> Result<(), LruCacheCorrupted> { + if index == self.head { + // If the node is already the head, do nothing + return Ok(()); + } + + self.detach_node(index)?; + self.attach_as_head(index) + } + + /// Helper function to evict the least-recently used node, which is the + /// tail of the linked list + /// Returns the index of the evicted node + fn evict_lru(&mut self) -> Result { + let index = self.tail; + if index == self.capacity { + // If the list is empty, do nothing + return Ok(self.capacity); + } + self.detach_node(index)?; + let node = self.order.get(index).ok_or(LruCacheCorrupted)?; + self.cache.remove(&node.key); + Ok(index) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_lru_cache() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1).expect("cache corrupted"); + cache.insert(2, 2).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), Some(1)); + cache.insert(3, 3).expect("cache corrupted"); + assert_eq!(cache.get(&2).expect("cache corrupted"), None); + cache.insert(4, 4).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), None); + assert_eq!(cache.get(&3).expect("cache corrupted"), Some(3)); + assert_eq!(cache.get(&4).expect("cache corrupted"), Some(4)); + } + + #[test] + fn test_lru_cache_update() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1).expect("cache corrupted"); + cache.insert(2, 2).expect("cache corrupted"); + cache.insert(1, 10).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), Some(10)); + cache.insert(3, 3).expect("cache corrupted"); + assert_eq!(cache.get(&2).expect("cache corrupted"), None); + cache.insert(2, 4).expect("cache corrupted"); + assert_eq!(cache.get(&2).expect("cache corrupted"), Some(4)); + assert_eq!(cache.get(&3).expect("cache corrupted"), Some(3)); + } + + #[test] + fn test_lru_cache_evicted() { + let mut cache = LruCache::new(2); + + assert!(cache.insert(1, 1).expect("cache corrupted").is_none()); + assert!(cache.insert(2, 2).expect("cache corrupted").is_none()); + let evicted = cache + .insert(3, 3) + .expect("cache corrupted") + .expect("expected an eviction"); + assert_eq!(evicted, (1, 1)); + } + + #[test] + fn test_lru_cache_flush() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1).expect("cache corrupted"); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .expect("cache corrupted or flush failed"); + + assert_eq!(flushed, vec![(1, 1)]); + + cache.insert(1, 3).expect("cache corrupted"); + cache.insert(2, 2).expect("cache corrupted"); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .expect("cache corrupted or flush failed"); + + flushed.sort(); + assert_eq!(flushed, vec![(1, 3), (2, 2)]); + } + + #[test] + fn test_lru_cache_evict_clean() { + let mut cache = LruCache::new(2); + + assert!(cache + .insert_with_dirty(0, 0, false) + .expect("cache corrupted") + .is_none()); + assert!(cache + .insert_with_dirty(1, 1, false) + .expect("cache corrupted") + .is_none()); + assert!(cache + .insert_with_dirty(2, 2, true) + .expect("cache corrupted") + .is_none()); + assert!(cache + .insert_with_dirty(3, 3, true) + .expect("cache corrupted") + .is_none()); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .expect("cache corrupted or flush failed"); + + flushed.sort(); + assert_eq!(flushed, [(2, 2), (3, 3)]); + } + + #[test] + fn test_lru_cache_capacity_one() { + let mut cache = LruCache::new(1); + + cache.insert(1, 1).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), Some(1)); + + cache.insert(2, 2).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), None); + assert_eq!(cache.get(&2).expect("cache corrupted"), Some(2)); + } + + #[test] + fn test_lru_cache_capacity_one_update() { + let mut cache = LruCache::new(1); + + cache.insert(1, 1).expect("cache corrupted"); + cache.insert(1, 2).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), Some(2)); + + cache.insert(2, 3).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), None); + assert_eq!(cache.get(&2).expect("cache corrupted"), Some(3)); + } + + #[test] + fn test_lru_cache_capacity_one_eviction() { + let mut cache = LruCache::new(1); + + assert!(cache.insert(1, 1).expect("cache corrupted").is_none()); + let evicted = cache + .insert(2, 2) + .expect("cache corrupted") + .expect("expected eviction"); + assert_eq!(evicted, (1, 1)); + } + + #[test] + fn test_lru_cache_capacity_one_flush() { + let mut cache = LruCache::new(1); + + cache.insert(1, 1).expect("cache corrupted"); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .expect("cache corrupted or flush failed"); + + assert_eq!(flushed, vec![(1, 1)]); + + cache.insert(2, 2).expect("cache corrupted"); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .expect("cache corrupted or flush failed"); + + assert_eq!(flushed, vec![(2, 2)]); + } + + /// Simple LRU implementation for property testing + pub struct SimpleLRU { + pub cache: Vec>, + capacity: usize, + } + + impl SimpleLRU { + pub fn new(capacity: usize) -> Self { + SimpleLRU { + cache: Vec::with_capacity(capacity), + capacity, + } + } + + pub fn insert(&mut self, key: u32, value: u32, dirty: bool) { + if let Some(pos) = self.cache.iter().position(|x| x.key == key) { + self.cache.remove(pos); + } else if self.cache.len() == self.capacity { + self.cache.remove(0); + } + self.cache.push(Node { + key, + value, + dirty, + next: 0, + prev: 0, + }); + } + + pub fn get(&mut self, key: u32) -> Option { + if let Some(pos) = self.cache.iter().position(|x| x.key == key) { + let node = self.cache.remove(pos); + let value = node.value; + self.cache.push(node); + Some(value) + } else { + None + } + } + + pub fn flush(&mut self, mut f: impl FnMut(&u32, u32) -> Result<(), E>) -> Result<(), E> { + for node in self.cache.iter_mut().rev() { + if node.dirty { + f(&node.key, node.value)?; + } + node.dirty = false; + } + Ok(()) + } + } +} + +#[cfg(test)] +mod property_tests { + use proptest::prelude::*; + + use super::tests::SimpleLRU; + use super::*; + + #[derive(Debug, Clone)] + enum CacheOp { + Insert(u32, u32), + Get(u32), + InsertClean(u32, u32), + Flush, + } + + prop_compose! { + fn arbitrary_op()(op_type in 0..4, key in 0..100u32, value in 0..1000u32) -> CacheOp { + match op_type { + 0 => CacheOp::Insert(key, value), + 1 => CacheOp::Get(key), + 2 => CacheOp::InsertClean(key, value), + _ => CacheOp::Flush, + } + } + } + + proptest! { + #[test] + fn doesnt_crash_with_random_operations(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { + let mut cache = LruCache::new(10); + for op in ops { + match op { + CacheOp::Insert(k, v) => { cache.insert(k, v).expect("cache corrupted"); } + CacheOp::Get(k) => { cache.get(&k).expect("cache corrupted"); } + CacheOp::InsertClean(k, v) => { cache.insert_clean(k, v).expect("cache corrupted"); } + CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).expect("cache corrupted or flush failed"); } + } + } + } + + #[test] + fn maintains_size_invariant(ops in prop::collection::vec(0..100u32, 1..1000)) { + let capacity = 10; + let mut cache = LruCache::new(capacity); + for op in ops { + cache.insert(op, op).expect("cache corrupted"); + prop_assert!(cache.cache.len() <= capacity); + prop_assert!(cache.order.len() <= capacity); + } + } + + #[test] + fn maintains_linked_list_integrity(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { + let mut cache = LruCache::new(10); + for op in ops { + match op { + CacheOp::Insert(k, v) => { cache.insert(k, v).expect("cache corrupted"); } + CacheOp::Get(k) => { cache.get(&k).expect("cache corrupted"); } + CacheOp::InsertClean(k, v) => { cache.insert_clean(k, v).expect("cache corrupted"); } + CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).expect("cache corrupted or flush failed"); } + } + // Verify linked list integrity + if !cache.order.is_empty() { + let mut curr = cache.head; + let mut count = 0; + while curr != cache.capacity { + if count >= cache.order.len() { + prop_assert!(false, "Linked list cycle detected"); + } + if cache.order[curr].next != cache.capacity { + prop_assert_eq!(cache.order[cache.order[curr].next].prev, curr); + } + curr = cache.order[curr].next; + count += 1; + } + } + } + } + + #[test] + fn maintains_lru_correctness(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { + let mut cache = LruCache::new(5); + let mut simple = SimpleLRU::new(5); + for op in ops { + match op { + CacheOp::Insert(k, v) => { + cache.insert(k, v).expect("cache corrupted"); + simple.insert(k, v, true); + } + CacheOp::Get(k) => { + let actual = cache.get(&k).expect("cache corrupted"); + let expected = simple.get(k); + prop_assert_eq!(actual, expected); + } + CacheOp::InsertClean(k, v) => { + cache.insert_clean(k, v).expect("cache corrupted"); + simple.insert(k, v, false); + } + CacheOp::Flush => { + let mut flushed = vec![]; + let mut simple_flushed = vec![]; + cache.flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }).expect("cache corrupted or flush failed"); + simple.flush(|k, v| { + simple_flushed.push((*k, v)); + Ok::<(), ()>(()) + }).unwrap(); + flushed.sort(); + simple_flushed.sort(); + prop_assert_eq!(flushed, simple_flushed); + } + }; + + // The cache should have the same order as the simple LRU + let mut curr = cache.head; + let mut count = 0; + while curr != cache.capacity { + if count >= cache.order.len() { + prop_assert!(false, "Linked list cycle detected"); + } + let idx = simple.cache.len() - count - 1; + prop_assert_eq!(cache.order[curr].key, simple.cache[idx].key); + prop_assert_eq!(cache.order[curr].value, simple.cache[idx].value); + curr = cache.order[curr].next; + count += 1; + } + } + } + } +} diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 4e332179e6e..9e45a05994e 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -707,7 +707,7 @@ macro_rules! fmax { }} } -#[cfg(feature = "canonical")] +#[cfg(feature = "rusqlite")] macro_rules! impl_byte_array_rusqlite_only { ($thing:ident) => { impl rusqlite::types::FromSql for $thing { diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 46158d2f4f4..db80ed51e49 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -19,8 +19,10 @@ pub mod log; #[macro_use] pub mod macros; pub mod chunked_encoding; +#[cfg(feature = "rusqlite")] pub mod db; pub mod hash; +pub mod lru_cache; pub mod pair; pub mod pipe; pub mod retry; diff --git a/stacks-common/src/util/secp256k1/mod.rs b/stacks-common/src/util/secp256k1/mod.rs new file mode 100644 index 00000000000..50ee281e306 --- /dev/null +++ b/stacks-common/src/util/secp256k1/mod.rs @@ -0,0 +1,33 @@ +#[cfg(not(target_family = "wasm"))] +mod native; + +#[cfg(not(target_family = "wasm"))] +pub use self::native::*; + +#[cfg(target_family = "wasm")] +mod wasm; + +#[cfg(target_family = "wasm")] +pub use self::wasm::*; + +pub const MESSAGE_SIGNATURE_ENCODED_SIZE: u32 = 65; + +pub struct MessageSignature(pub [u8; 65]); +impl_array_newtype!(MessageSignature, u8, 65); +impl_array_hexstring_fmt!(MessageSignature); +impl_byte_array_newtype!(MessageSignature, u8, 65); +impl_byte_array_serde!(MessageSignature); + +pub struct SchnorrSignature(pub [u8; 65]); +impl_array_newtype!(SchnorrSignature, u8, 65); +impl_array_hexstring_fmt!(SchnorrSignature); +impl_byte_array_newtype!(SchnorrSignature, u8, 65); +impl_byte_array_serde!(SchnorrSignature); +pub const SCHNORR_SIGNATURE_ENCODED_SIZE: u32 = 65; + +impl Default for SchnorrSignature { + /// Creates a default Schnorr Signature. Note this is not a valid signature. + fn default() -> Self { + Self([0u8; 65]) + } +} diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1/native.rs similarity index 98% rename from stacks-common/src/util/secp256k1.rs rename to stacks-common/src/util/secp256k1/native.rs index e33ce4f1549..f547f7825da 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1/native.rs @@ -13,22 +13,24 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use rand::RngCore; -use secp256k1; -use secp256k1::ecdsa::{ + +use ::secp256k1; +use ::secp256k1::ecdsa::{ RecoverableSignature as LibSecp256k1RecoverableSignature, RecoveryId as LibSecp256k1RecoveryID, Signature as LibSecp256k1Signature, }; -use secp256k1::{ +pub use ::secp256k1::Error; +use ::secp256k1::{ constants as LibSecp256k1Constants, Error as LibSecp256k1Error, Message as LibSecp256k1Message, PublicKey as LibSecp256k1PublicKey, Secp256k1, SecretKey as LibSecp256k1PrivateKey, }; +use rand::RngCore; use serde::de::{Deserialize, Error as de_Error}; use serde::Serialize; -use super::hash::Sha256Sum; +use super::MessageSignature; use crate::types::{PrivateKey, PublicKey}; -use crate::util::hash::{hex_bytes, to_hex}; +use crate::util::hash::{hex_bytes, to_hex, Sha256Sum}; // per-thread Secp256k1 context thread_local!(static _secp256k1: Secp256k1 = Secp256k1::new()); @@ -55,13 +57,6 @@ pub struct Secp256k1PrivateKey { compress_public: bool, } -pub struct MessageSignature(pub [u8; 65]); -impl_array_newtype!(MessageSignature, u8, 65); -impl_array_hexstring_fmt!(MessageSignature); -impl_byte_array_newtype!(MessageSignature, u8, 65); -impl_byte_array_serde!(MessageSignature); -pub const MESSAGE_SIGNATURE_ENCODED_SIZE: u32 = 65; - impl MessageSignature { pub fn empty() -> MessageSignature { // NOTE: this cannot be a valid signature diff --git a/stacks-common/src/util/secp256k1/wasm.rs b/stacks-common/src/util/secp256k1/wasm.rs new file mode 100644 index 00000000000..bea3c5e2d5d --- /dev/null +++ b/stacks-common/src/util/secp256k1/wasm.rs @@ -0,0 +1,323 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use ::libsecp256k1; +pub use ::libsecp256k1::Error; +use ::libsecp256k1::{ + Error as LibSecp256k1Error, Message as LibSecp256k1Message, PublicKey as LibSecp256k1PublicKey, + RecoveryId as LibSecp256k1RecoveryId, SecretKey as LibSecp256k1PrivateKey, + Signature as LibSecp256k1Signature, +}; +use rand::RngCore; +use serde::de::{Deserialize, Error as de_Error}; +use serde::Serialize; + +use super::MessageSignature; +use crate::types::{PrivateKey, PublicKey}; +use crate::util::hash::{hex_bytes, to_hex}; + +pub const PUBLIC_KEY_SIZE: usize = 33; + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct Secp256k1PublicKey { + // serde is broken for secp256k1, so do it ourselves + #[serde( + serialize_with = "secp256k1_pubkey_serialize", + deserialize_with = "secp256k1_pubkey_deserialize" + )] + key: LibSecp256k1PublicKey, + compressed: bool, +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +pub struct Secp256k1PrivateKey { + // serde is broken for secp256k1, so do it ourselves + #[serde( + serialize_with = "secp256k1_privkey_serialize", + deserialize_with = "secp256k1_privkey_deserialize" + )] + key: LibSecp256k1PrivateKey, + compress_public: bool, +} + +impl Secp256k1PublicKey { + pub fn from_slice(data: &[u8]) -> Result { + let (format, compressed) = if data.len() == PUBLIC_KEY_SIZE { + (libsecp256k1::PublicKeyFormat::Compressed, true) + } else { + (libsecp256k1::PublicKeyFormat::Full, false) + }; + match LibSecp256k1PublicKey::parse_slice(data, Some(format)) { + Ok(pubkey_res) => Ok(Secp256k1PublicKey { + key: pubkey_res, + compressed, + }), + Err(_e) => Err("Invalid public key: failed to load"), + } + } + + pub fn to_hex(&self) -> String { + if self.compressed { + to_hex(&self.key.serialize_compressed().to_vec()) + } else { + to_hex(&self.key.serialize().to_vec()) + } + } + + pub fn to_bytes_compressed(&self) -> Vec { + self.key.serialize_compressed().to_vec() + } + + pub fn compressed(&self) -> bool { + self.compressed + } + + pub fn set_compressed(&mut self, value: bool) { + self.compressed = value; + } + + pub fn to_bytes(&self) -> Vec { + if self.compressed { + self.key.serialize_compressed().to_vec() + } else { + self.key.serialize().to_vec() + } + } + + pub fn from_hex(hex_string: &str) -> Result { + let data = hex_bytes(hex_string).map_err(|_e| "Failed to decode hex public key")?; + Secp256k1PublicKey::from_slice(&data[..]).map_err(|_e| "Invalid public key hex string") + } + + pub fn from_private(privk: &Secp256k1PrivateKey) -> Secp256k1PublicKey { + let key = LibSecp256k1PublicKey::from_secret_key(&privk.key); + Secp256k1PublicKey { + key, + compressed: privk.compress_public, + } + } + + /// recover message and signature to public key (will be compressed) + pub fn recover_to_pubkey( + msg: &[u8], + sig: &MessageSignature, + ) -> Result { + let secp256k1_sig = secp256k1_recover(msg, sig.as_bytes()) + .map_err(|_e| "Invalid signature: failed to recover public key")?; + + Secp256k1PublicKey::from_slice(&secp256k1_sig) + } +} + +impl Secp256k1PrivateKey { + pub fn new() -> Secp256k1PrivateKey { + let mut rng = rand::thread_rng(); + loop { + // keep trying to generate valid bytes + let mut random_32_bytes = [0u8; 32]; + rng.fill_bytes(&mut random_32_bytes); + let pk_res = LibSecp256k1PrivateKey::parse_slice(&random_32_bytes); + match pk_res { + Ok(pk) => { + return Secp256k1PrivateKey { + key: pk, + compress_public: true, + }; + } + Err(_) => { + continue; + } + } + } + } + + pub fn from_slice(data: &[u8]) -> Result { + if data.len() < 32 { + return Err("Invalid private key: shorter than 32 bytes"); + } + if data.len() > 33 { + return Err("Invalid private key: greater than 33 bytes"); + } + let compress_public = if data.len() == 33 { + // compressed byte tag? + if data[32] != 0x01 { + return Err("Invalid private key: invalid compressed byte marker"); + } + true + } else { + false + }; + + match LibSecp256k1PrivateKey::parse_slice(&data[0..32]) { + Ok(privkey_res) => Ok(Secp256k1PrivateKey { + key: privkey_res, + compress_public, + }), + Err(_e) => Err("Invalid private key: failed to load"), + } + } + + pub fn from_hex(hex_string: &str) -> Result { + let data = hex_bytes(hex_string).map_err(|_e| "Failed to decode hex private key")?; + Secp256k1PrivateKey::from_slice(&data[..]).map_err(|_e| "Invalid private key hex string") + } + + pub fn compress_public(&self) -> bool { + self.compress_public + } + + pub fn set_compress_public(&mut self, value: bool) { + self.compress_public = value; + } +} + +pub fn secp256k1_recover( + message_arr: &[u8], + serialized_signature: &[u8], +) -> Result<[u8; 33], LibSecp256k1Error> { + let recovery_id = libsecp256k1::RecoveryId::parse(serialized_signature[64] as u8)?; + let message = LibSecp256k1Message::parse_slice(message_arr)?; + let signature = LibSecp256k1Signature::parse_standard_slice(&serialized_signature[..64])?; + let recovered_pub_key = libsecp256k1::recover(&message, &signature, &recovery_id)?; + Ok(recovered_pub_key.serialize_compressed()) +} + +pub fn secp256k1_verify( + message_arr: &[u8], + serialized_signature: &[u8], + pubkey_arr: &[u8], +) -> Result<(), LibSecp256k1Error> { + let message = LibSecp256k1Message::parse_slice(message_arr)?; + let signature = LibSecp256k1Signature::parse_standard_slice(&serialized_signature[..64])?; // ignore 65th byte if present + let pubkey = LibSecp256k1PublicKey::parse_slice( + pubkey_arr, + Some(libsecp256k1::PublicKeyFormat::Compressed), + )?; + + let res = libsecp256k1::verify(&message, &signature, &pubkey); + if res { + Ok(()) + } else { + Err(LibSecp256k1Error::InvalidPublicKey) + } +} + +fn secp256k1_pubkey_serialize( + pubk: &LibSecp256k1PublicKey, + s: S, +) -> Result { + let key_hex = to_hex(&pubk.serialize().to_vec()); + s.serialize_str(&key_hex.as_str()) +} + +fn secp256k1_pubkey_deserialize<'de, D: serde::Deserializer<'de>>( + d: D, +) -> Result { + let key_hex = String::deserialize(d)?; + let key_bytes = hex_bytes(&key_hex).map_err(de_Error::custom)?; + + LibSecp256k1PublicKey::parse_slice(&key_bytes[..], None).map_err(de_Error::custom) +} + +fn secp256k1_privkey_serialize( + privk: &LibSecp256k1PrivateKey, + s: S, +) -> Result { + let key_hex = to_hex(&privk.serialize().to_vec()); + s.serialize_str(key_hex.as_str()) +} + +fn secp256k1_privkey_deserialize<'de, D: serde::Deserializer<'de>>( + d: D, +) -> Result { + let key_hex = String::deserialize(d)?; + let key_bytes = hex_bytes(&key_hex).map_err(de_Error::custom)?; + + LibSecp256k1PrivateKey::parse_slice(&key_bytes[..]).map_err(de_Error::custom) +} + +impl MessageSignature { + pub fn empty() -> MessageSignature { + // NOTE: this cannot be a valid signature + MessageSignature([0u8; 65]) + } + + #[cfg(test)] + // test method for generating place-holder data + pub fn from_raw(sig: &Vec) -> MessageSignature { + let mut buf = [0u8; 65]; + if sig.len() < 65 { + buf.copy_from_slice(&sig[..]); + } else { + buf.copy_from_slice(&sig[..65]); + } + MessageSignature(buf) + } + + pub fn from_secp256k1_recoverable( + sig: &LibSecp256k1Signature, + recid: LibSecp256k1RecoveryId, + ) -> MessageSignature { + let bytes = sig.serialize(); + let mut ret_bytes = [0u8; 65]; + let recovery_id_byte = recid.serialize(); // recovery ID will be 0, 1, 2, or 3 + ret_bytes[0] = recovery_id_byte; + ret_bytes[1..=64].copy_from_slice(&bytes[..64]); + MessageSignature(ret_bytes) + } + + pub fn to_secp256k1_recoverable( + &self, + ) -> Option<(LibSecp256k1Signature, LibSecp256k1RecoveryId)> { + let recovery_id = match LibSecp256k1RecoveryId::parse(self.0[0]) { + Ok(rid) => rid, + Err(_) => { + return None; + } + }; + let signature = LibSecp256k1Signature::parse_standard_slice(&self.0[1..65]).ok()?; + Some((signature, recovery_id)) + } +} + +impl PublicKey for Secp256k1PublicKey { + fn to_bytes(&self) -> Vec { + self.to_bytes() + } + + fn verify(&self, data_hash: &[u8], sig: &MessageSignature) -> Result { + let pub_key = Secp256k1PublicKey::recover_to_pubkey(data_hash, sig)?; + Ok(self.eq(&pub_key)) + } +} + +impl PrivateKey for Secp256k1PrivateKey { + fn to_bytes(&self) -> Vec { + let mut bits = self.key.serialize().to_vec(); + if self.compress_public { + bits.push(0x01); + } + bits + } + + fn sign(&self, data_hash: &[u8]) -> Result { + let message = LibSecp256k1Message::parse_slice(data_hash) + .map_err(|_e| "Invalid message: failed to decode data hash: must be a 32-byte hash")?; + let (sig, recid) = libsecp256k1::sign(&message, &self.key); + let rec_sig = MessageSignature::from_secp256k1_recoverable(&sig, recid); + Ok(rec_sig) + } +} diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index 5c7439daf94..bd124a5da0d 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -26,7 +26,7 @@ use std::{error, fmt}; use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; -use curve25519_dalek::scalar::Scalar as ed25519_Scalar; +use curve25519_dalek::scalar::{clamp_integer, Scalar as ed25519_Scalar}; use rand; use sha2::{Digest, Sha512}; @@ -181,6 +181,7 @@ impl VRFPublicKey { pub enum Error { InvalidPublicKey, InvalidDataError, + InvalidHashPoints, OSRNGError(rand::Error), } @@ -189,6 +190,7 @@ impl fmt::Display for Error { match *self { Error::InvalidPublicKey => write!(f, "Invalid public key"), Error::InvalidDataError => write!(f, "No data could be found"), + Error::InvalidHashPoints => write!(f, "VRF hash points did not yield a valid scalar"), Error::OSRNGError(ref e) => fmt::Display::fmt(e, f), } } @@ -199,6 +201,7 @@ impl error::Error for Error { match *self { Error::InvalidPublicKey => None, Error::InvalidDataError => None, + Error::InvalidHashPoints => None, Error::OSRNGError(ref e) => Some(e), } } @@ -246,7 +249,7 @@ impl VRFProof { #[allow(clippy::needless_range_loop)] pub fn check_c(c: &ed25519_Scalar) -> bool { - let c_bytes = c.reduce().to_bytes(); + let c_bytes = c.to_bytes(); // upper 16 bytes of c must be 0's for c_byte in c_bytes[16..32].iter() { @@ -281,7 +284,9 @@ impl VRFProof { // 0 32 48 80 // |----------------------------|----------|---------------------------| // Gamma point c scalar s scalar - let gamma_opt = CompressedEdwardsY::from_slice(&bytes[0..32]).decompress(); + let gamma_opt = CompressedEdwardsY::from_slice(&bytes[0..32]) + .ok() + .and_then(|y| y.decompress()); if gamma_opt.is_none() { test_debug!("Invalid Gamma"); return None; @@ -297,10 +302,14 @@ impl VRFProof { c_buf[..16].copy_from_slice(&bytes[32..(16 + 32)]); s_buf[..32].copy_from_slice(&bytes[48..(32 + 48)]); - let c = ed25519_Scalar::from_canonical_bytes(c_buf)?; - let s = ed25519_Scalar::from_canonical_bytes(s_buf)?; - - Some(VRFProof { Gamma: gamma, c, s }) + let c: Option = ed25519_Scalar::from_canonical_bytes(c_buf).into(); + let s: Option = ed25519_Scalar::from_canonical_bytes(s_buf).into(); + + Some(VRFProof { + Gamma: gamma, + c: c?, + s: s?, + }) } _ => None, } @@ -324,7 +333,7 @@ impl VRFProof { "FATAL ERROR: somehow constructed an invalid ECVRF proof" ); - let c_bytes = self.c.reduce().to_bytes(); + let c_bytes = self.c.to_bytes(); c_bytes_16[0..16].copy_from_slice(&c_bytes[0..16]); let gamma_bytes = self.Gamma.compress().to_bytes(); @@ -386,7 +395,7 @@ impl VRF { } let y = CompressedEdwardsY::from_slice(&hasher.finalize()[0..32]); - if let Some(h) = y.decompress() { + if let Some(h) = y.ok().and_then(|y| y.decompress()) { break h; } @@ -445,8 +454,7 @@ impl VRF { let mut h_32 = [0u8; 32]; h_32.copy_from_slice(&h[0..32]); - let x_scalar_raw = ed25519_Scalar::from_bits(h_32); - let x_scalar = x_scalar_raw.reduce(); // use the canonical scalar for the private key + let x_scalar = ed25519_Scalar::from_bytes_mod_order(clamp_integer(h_32)); trunc_hash.copy_from_slice(&h[32..64]); @@ -469,17 +477,17 @@ impl VRF { /// Convert a 16-byte string into a scalar. /// The upper 16 bytes in the resulting scalar MUST BE 0's - fn ed25519_scalar_from_hash128(hash128: &[u8; 16]) -> ed25519_Scalar { + fn ed25519_scalar_from_hash128(hash128: &[u8; 16]) -> Option { let mut scalar_buf = [0u8; 32]; scalar_buf[0..16].copy_from_slice(hash128); - ed25519_Scalar::from_bits(scalar_buf) + ed25519_Scalar::from_canonical_bytes(scalar_buf).into() } /// ECVRF proof routine /// https://tools.ietf.org/id/draft-irtf-cfrg-vrf-02.html#rfc.section.5.1 #[allow(clippy::op_ref)] - pub fn prove(secret: &VRFPrivateKey, alpha: &[u8]) -> VRFProof { + pub fn prove(secret: &VRFPrivateKey, alpha: &[u8]) -> Option { let (Y_point, x_scalar, trunc_hash) = VRF::expand_privkey(secret); let H_point = VRF::hash_to_curve(&Y_point, alpha); @@ -490,15 +498,15 @@ impl VRF { let kH_point = &k_scalar * &H_point; let c_hashbuf = VRF::hash_points(&H_point, &Gamma_point, &kB_point, &kH_point); - let c_scalar = VRF::ed25519_scalar_from_hash128(&c_hashbuf); + let c_scalar = VRF::ed25519_scalar_from_hash128(&c_hashbuf)?; - let s_full_scalar = &k_scalar + &c_scalar * &x_scalar; - let s_scalar = s_full_scalar.reduce(); + let s_scalar = &k_scalar + &c_scalar * &x_scalar; // NOTE: expect() won't panic because c_scalar is guaranteed to have // its upper 16 bytes as 0 VRFProof::new(Gamma_point, c_scalar, s_scalar) - .expect("FATAL ERROR: upper-16 bytes of proof's C scalar are NOT 0") + .inspect_err(|e| error!("FATAL: upper-16 bytes of proof's C scalar are NOT 0: {e}")) + .ok() } /// Given a public key, verify that the private key owner that generate the ECVRF proof did so on the given message. @@ -509,7 +517,7 @@ impl VRF { #[allow(clippy::op_ref)] pub fn verify(Y_point: &VRFPublicKey, proof: &VRFProof, alpha: &[u8]) -> Result { let H_point = VRF::hash_to_curve(Y_point, alpha); - let s_reduced = proof.s().reduce(); + let s_reduced = proof.s(); let Y_point_ed = CompressedEdwardsY(Y_point.to_bytes()) .decompress() .ok_or(Error::InvalidPublicKey)?; @@ -517,11 +525,13 @@ impl VRF { return Err(Error::InvalidPublicKey); } - let U_point = &s_reduced * &ED25519_BASEPOINT_POINT - proof.c() * Y_point_ed; - let V_point = &s_reduced * &H_point - proof.c() * proof.Gamma(); + let U_point = s_reduced * &ED25519_BASEPOINT_POINT - proof.c() * Y_point_ed; + let V_point = s_reduced * &H_point - proof.c() * proof.Gamma(); let c_prime_hashbuf = VRF::hash_points(&H_point, proof.Gamma(), &U_point, &V_point); - let c_prime = VRF::ed25519_scalar_from_hash128(&c_prime_hashbuf); + let Some(c_prime) = VRF::ed25519_scalar_from_hash128(&c_prime_hashbuf) else { + return Err(Error::InvalidHashPoints); + }; // NOTE: this leverages constant-time comparison inherited from the Scalar impl Ok(c_prime == *(proof.c())) @@ -583,7 +593,7 @@ mod tests { let privk = VRFPrivateKey::from_bytes(&proof_fixture.privkey[..]).unwrap(); let expected_proof_bytes = &proof_fixture.proof[..]; - let proof = VRF::prove(&privk, &alpha.to_vec()); + let proof = VRF::prove(&privk, &alpha.to_vec()).unwrap(); let proof_bytes = proof.to_bytes(); assert_eq!(proof_bytes.to_vec(), expected_proof_bytes.to_vec()); @@ -605,7 +615,7 @@ mod tests { let mut msg = [0u8; 1024]; rng.fill_bytes(&mut msg); - let proof = VRF::prove(&secret_key, &msg); + let proof = VRF::prove(&secret_key, &msg).unwrap(); let res = VRF::verify(&public_key, &proof, &msg).unwrap(); assert!(res); diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index e8307b48374..35ae5d07c40 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -5,9 +5,15 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Changed + +- For some rejection reasons, a signer will reconsider a block proposal that it previously rejected ([#5880](https://github.com/stacks-network/stacks-core/pull/5880)) + ## [3.1.0.0.7.0] -## Changed +### Changed - Add new reject codes to the signer response for better visibility into why a block was rejected. - When allowing a reorg within the `reorg_attempts_activity_timeout_ms`, the signer will now watch the responses from other signers and if >30% of them reject this reorg attempt, then the signer will mark the miner as invalid, reject further attempts to reorg and allow the previous miner to extend their tenure. @@ -18,7 +24,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [3.1.0.0.6.0] -## Added +### Added - Introduced the `reorg_attempts_activity_timeout_ms` configuration option for signers which is used to determine the length of time after the last block of a tenure is confirmed that an incoming miner's attempts to reorg it are considered valid miner activity. - Add signer configuration option `tenure_idle_timeout_buffer_secs` to specify the number of seconds of buffer the signer will add to its tenure extend time that it sends to miners. The idea is to allow for some clock skew between the miner and signers, preventing the case where the miner attempts to tenure extend too early. diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index eb58164a6e6..19687e0c09e 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -61,5 +61,6 @@ version = "0.24.3" features = ["serde", "recovery"] [features] +default = [] monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] testing = [] diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index f6184192122..0f51c05fc56 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -20,7 +20,6 @@ use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; use blockstack_lib::util_lib::db::Error as DBError; use libsigner::v0::messages::RejectReason; -use slog::{slog_info, slog_warn}; use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash, StacksPublicKey}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Hash160; @@ -39,6 +38,12 @@ pub enum SignerChainstateError { /// Error resulting from crate::client interactions #[error("Client error: {0}")] ClientError(#[from] ClientError), + /// The signer could not find information about the parent tenure + #[error("No information available for parent tenure '{0}'")] + NoParentTenureInfo(ConsensusHash), + /// The local state machine wasn't ready to be queried + #[error("The local state machine is not ready, so no update message can be produced")] + LocalStateMachineNotReady, } impl From for RejectReason { @@ -284,7 +289,7 @@ impl SortitionsView { warn!( "Miner block proposal has bitvec field which punishes in disagreement with signer. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); @@ -322,7 +327,7 @@ impl SortitionsView { warn!( "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); @@ -333,7 +338,7 @@ impl SortitionsView { warn!( "Miner block proposal pubkey does not match the winning pubkey hash for its sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_pubkey" => &block_pk.to_hex(), "proposed_block_pubkey_hash" => %block_pkh, "sortition_winner_pubkey_hash" => %proposed_by.state().miner_pkh, @@ -348,7 +353,7 @@ impl SortitionsView { warn!( "Current miner behaved improperly, this signer views the miner as invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), ); return Err(RejectReason::InvalidMiner); } @@ -362,7 +367,7 @@ impl SortitionsView { warn!( "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_miner_status" => ?self.cur_sortition.miner_status, "last_sortition" => %last_sortition.consensus_hash ); @@ -402,12 +407,12 @@ impl SortitionsView { false, ); let epoch_time = get_epoch_time_secs(); - let enough_time_passed = epoch_time > extend_timestamp; + let enough_time_passed = epoch_time >= extend_timestamp; if !changed_burn_view && !enough_time_passed { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "extend_timestamp" => extend_timestamp, "epoch_time" => epoch_time, ); @@ -418,7 +423,10 @@ impl SortitionsView { Ok(()) } - fn check_parent_tenure_choice( + /// Check if the tenure defined by `sortition_state` is building off of an + /// appropriate tenure. Note that this does not check that it confirms the correct + /// number of blocks from that tenure! + pub fn check_parent_tenure_choice( sortition_state: &SortitionState, block: &NakamotoBlock, signer_db: &SignerDb, @@ -435,7 +443,7 @@ impl SortitionsView { info!( "Most recent miner's tenure does not build off the prior sortition, checking if this is valid behavior"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "sortition_state.consensus_hash" => %sortition_state.consensus_hash, "sortition_state.prior_sortition" => %sortition_state.prior_sortition, "sortition_state.parent_tenure_id" => %sortition_state.parent_tenure_id, @@ -449,7 +457,7 @@ impl SortitionsView { if tenures_reorged.is_empty() { warn!("Miner is not building off of most recent tenure, but stacks node was unable to return information about the relevant sortitions. Marking miner invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), ); return Ok(false); } @@ -471,7 +479,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already more than one globally accepted block."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -488,7 +496,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks, and there is no local knowledge for that tenure's block timing."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -515,7 +523,7 @@ impl SortitionsView { info!( "Miner is not building off of most recent tenure. A tenure they reorg has already mined blocks, but the block was poorly timed, allowing the reorg."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_height" => block.header.chain_length, "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, @@ -537,7 +545,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -612,7 +620,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => info.block.header.chain_length + 1, ); @@ -641,7 +649,7 @@ impl SortitionsView { warn!( "Miner block proposal contains a tenure change, but failed to fetch the tenure tip for the parent tenure: {e:?}. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %tenure_change.prev_tenure_consensus_hash, ); return Ok(false); @@ -669,7 +677,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => tip_height + 1, ); @@ -722,8 +730,8 @@ impl SortitionsView { warn!( "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "last_in_tenure_signer_sighash" => %last_in_current_tenure.block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "last_in_tenure_signer_signature_hash" => %last_in_current_tenure.block.header.signer_signature_hash(), ); return Err(RejectReason::DuplicateBlockFound); } @@ -741,7 +749,7 @@ impl SortitionsView { info!( "Have no accepted blocks in the tenure, assuming block confirmation is correct"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_height" => block.header.chain_length, ); return Ok(true); @@ -752,7 +760,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => last_known_block.block.header.chain_length + 1, ); diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index ab8ad610562..59b8309d536 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -25,7 +25,6 @@ use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::serialization::SerializationError; use libsigner::RPCError; use libstackerdb::Error as StackerDBError; -use slog::slog_debug; pub use stackerdb::*; pub use stacks_client::*; use stacks_common::codec::Error as CodecError; diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 222eda72df8..a08ff727b4a 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -19,7 +19,6 @@ use clarity::codec::read_next; use hashbrown::HashMap; use libsigner::{MessageSlotID, SignerMessage, SignerSession, StackerDBSession}; use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; -use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash::to_hex; use stacks_common::{debug, info, warn}; diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index db0b356fb40..38c0a66bbbd 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -43,7 +43,6 @@ use libsigner::v0::messages::PeerInfo; use reqwest::header::AUTHORIZATION; use serde::Deserialize; use serde_json::json; -use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::CHAIN_ID_MAINNET; use stacks_common::types::chainstate::{ @@ -168,17 +167,17 @@ impl StacksClient { &self.stacks_address } - /// Get the stacks tip header of the tenure given its consensus hash + /// Get the header of the highest known block in the given tenure pub fn get_tenure_tip( &self, - consensus_hash: &ConsensusHash, + tenure_id: &ConsensusHash, ) -> Result { debug!("StacksClient: Getting tenure tip"; - "consensus_hash" => %consensus_hash, + "consensus_hash" => %tenure_id, ); let send_request = || { self.stacks_node_client - .get(self.tenure_tip_path(consensus_hash)) + .get(self.tenure_tip_path(tenure_id)) .send() .map_err(|e| { warn!("Signer failed to request latest sortition"; "err" => ?e); @@ -315,7 +314,7 @@ impl StacksClient { /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { debug!("StacksClient: Submitting block for validation"; - "signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); @@ -599,7 +598,7 @@ impl StacksClient { /// In tests, this panics if the retry takes longer than 30 seconds. pub fn post_block_until_ok(&self, log_fmt: &F, block: &NakamotoBlock) -> bool { debug!("StacksClient: Posting block to stacks node"; - "signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); @@ -629,7 +628,7 @@ impl StacksClient { /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { debug!("StacksClient: Posting block to the stacks node"; - "signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9f2df125341..1c69132e79d 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -52,8 +52,8 @@ use chainstate::SortitionsView; use config::GlobalConfig; use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait, VERSION_STRING}; use runloop::SignerResult; -use slog::{slog_info, slog_warn}; use stacks_common::{info, warn}; +use v0::signer_state::LocalStateMachine; use crate::client::StacksClient; use crate::config::SignerConfig; @@ -62,7 +62,7 @@ use crate::runloop::RunLoop; /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { /// Create a new `Signer` instance - fn new(config: SignerConfig) -> Self; + fn new(stacks_client: &StacksClient, signer_config: SignerConfig) -> Self; /// Get the reward cycle of the signer fn reward_cycle(&self) -> u64; /// Process an event @@ -71,26 +71,28 @@ pub trait Signer: Debug + Display { stacks_client: &StacksClient, sortition_state: &mut Option, event: Option<&SignerEvent>, - res: &Sender>, + res: &Sender, current_reward_cycle: u64, ); /// Check if the signer is in the middle of processing blocks fn has_unprocessed_blocks(&self) -> bool; + /// Get a reference to the local state machine of the signer + fn get_local_state_machine(&self) -> &LocalStateMachine; } /// A wrapper around the running signer type for the signer -pub type RunningSigner = libsigner::RunningSigner, Vec, T>; +pub type RunningSigner = libsigner::RunningSigner, SignerResult, T>; /// The wrapper for the runloop signer type type RunLoopSigner = - libsigner::Signer, RunLoop, SignerEventReceiver, T>; + libsigner::Signer, SignerEventReceiver, T>; /// The spawned signer pub struct SpawnedSigner + Send, T: SignerEventTrait> { /// The underlying running signer thread handle running_signer: RunningSigner, /// The result receiver for interacting with the running signer - pub res_recv: Receiver>, + pub res_recv: Receiver, /// The spawned signer's config pub config: GlobalConfig, /// Phantom data for the signer type @@ -99,12 +101,12 @@ pub struct SpawnedSigner + Send, T: SignerEventTrait> { impl + Send, T: SignerEventTrait> SpawnedSigner { /// Stop the signer thread and return the final state - pub fn stop(self) -> Option> { + pub fn stop(self) -> Option { self.running_signer.stop() } /// Wait for the signer to terminate, and get the final state. WARNING: This will hang forever if the event receiver stop signal was never sent/no error occurred. - pub fn join(self) -> Option> { + pub fn join(self) -> Option { self.running_signer.join() } } diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 821f2e1c6ec..2e1bf771e6f 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -34,7 +34,6 @@ use clarity::types::chainstate::StacksPublicKey; use clarity::util::sleep_ms; use libsigner::{SignerSession, VERSION_STRING}; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_error}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error}; diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs index 65b4fdda3e4..bbd59f00ea0 100644 --- a/stacks-signer/src/monitor_signers.rs +++ b/stacks-signer/src/monitor_signers.rs @@ -21,7 +21,6 @@ use clarity::types::StacksEpochId; use clarity::util::sleep_ms; use libsigner::v0::messages::{MessageSlotID, SignerMessage}; use libsigner::SignerSession; -use slog::{slog_info, slog_warn}; use stacks_common::{info, warn}; use crate::cli::MonitorSignersArgs; diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 60a530acabd..88c61c18b72 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -25,11 +25,11 @@ mod server; pub mod actions { use ::prometheus::HistogramTimer; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; - use slog::slog_error; use stacks_common::error; use crate::config::GlobalConfig; use crate::monitoring::prometheus::*; + use crate::v0::signer_state::LocalStateMachine; /// Update stacks tip height gauge pub fn update_stacks_tip_height(height: i64) { @@ -100,6 +100,14 @@ pub mod actions { .observe(latency_ms as f64 / 1000.0); } + /// Record the current local state machine + pub fn record_local_state(state: LocalStateMachine) { + SIGNER_LOCAL_STATE_MACHINE + .lock() + .expect("Local state machine lock poisoned") + .replace(state); + } + /// Start serving monitoring metrics. /// This will only serve the metrics if the `monitoring_prom` feature is enabled. pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), String> { @@ -121,9 +129,9 @@ pub mod actions { #[cfg(not(feature = "monitoring_prom"))] pub mod actions { use blockstack_lib::chainstate::nakamoto::NakamotoBlock; - use slog::slog_info; use stacks_common::info; + use crate::v0::signer_state::LocalStateMachine; use crate::GlobalConfig; /// Update stacks tip height gauge @@ -168,6 +176,9 @@ pub mod actions { /// Record the time taken to validate a block, as reported by the Stacks node. pub fn record_block_validation_latency(_latency_ms: u64) {} + /// Record the current local state machine + pub fn record_local_state(_state: LocalStateMachine) {} + /// Start serving monitoring metrics. /// This will only serve the metrics if the `monitoring_prom` feature is enabled. pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), String> { diff --git a/stacks-signer/src/monitoring/prometheus.rs b/stacks-signer/src/monitoring/prometheus.rs index 49f74ba1e88..048edb051d7 100644 --- a/stacks-signer/src/monitoring/prometheus.rs +++ b/stacks-signer/src/monitoring/prometheus.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::sync::Mutex; + use lazy_static::lazy_static; use prometheus::{ gather, histogram_opts, opts, register_histogram_vec, register_int_counter, @@ -21,6 +23,8 @@ use prometheus::{ IntGauge, TextEncoder, }; +use crate::v0::signer_state::LocalStateMachine; + lazy_static! { pub static ref STACKS_TIP_HEIGHT_GAUGE: IntGauge = register_int_gauge!(opts!( "stacks_signer_stacks_node_height", @@ -74,6 +78,8 @@ lazy_static! { "Time (seconds) measuring end-to-end time to respond to a block", vec![0.005, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 20.0, 30.0, 60.0, 120.0] ), &[]).unwrap(); + + pub static ref SIGNER_LOCAL_STATE_MACHINE: Mutex> = Mutex::new(None); } pub fn gather_metrics_string() -> String { diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs index 0e584eec58f..2b20b9131c0 100644 --- a/stacks-signer/src/monitoring/server.rs +++ b/stacks-signer/src/monitoring/server.rs @@ -20,7 +20,6 @@ use std::time::Instant; use clarity::util::hash::to_hex; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::VERSION_STRING; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; use tiny_http::{Response as HttpResponse, Server as HttpServer}; diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index f8bb4acac99..0bb8cd651b5 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -20,12 +20,12 @@ use std::time::Duration; use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; use libsigner::{SignerEntries, SignerEvent, SignerRunLoop}; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; use crate::config::{GlobalConfig, SignerConfig, SignerConfigMode}; +use crate::v0::signer_state::LocalStateMachine; #[cfg(any(test, feature = "testing"))] use crate::v0::tests::TEST_SKIP_SIGNER_CLEANUP; use crate::Signer as SignerTrait; @@ -53,6 +53,9 @@ pub struct StateInfo { pub reward_cycle_info: Option, /// The current running signers reward cycles pub running_signers: Vec, + /// The local state machines for the running signers + /// as a pair of (reward-cycle, state-machine) + pub signer_state_machines: Vec<(u64, Option)>, } /// The signer result that can be sent across threads @@ -326,7 +329,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let new_signer_config = match self.get_signer_config(reward_cycle) { Ok(Some(new_signer_config)) => { let signer_mode = new_signer_config.signer_mode.clone(); - let new_signer = Signer::new(new_signer_config); + let new_signer = Signer::new(&self.stacks_client, new_signer_config); info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as {signer_mode}. Initialized signer state."); ConfiguredSigner::RegisteredSigner(new_signer) } @@ -477,7 +480,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo } impl, T: StacksMessageCodec + Clone + Send + Debug> - SignerRunLoop, T> for RunLoop + SignerRunLoop for RunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.config.event_timeout = timeout; @@ -490,16 +493,16 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> fn run_one_pass( &mut self, event: Option>, - res: &Sender>, - ) -> Option> { + res: &Sender, + ) -> Option { debug!( "Running one pass for the signer. state={:?}, event={event:?}", self.state ); + // This is the only event that we respond to from the outer signer runloop if let Some(SignerEvent::StatusCheck) = event { - info!("Signer status check requested: {:?}.", self.state); - if let Err(e) = res.send(vec![StateInfo { + let state_info = StateInfo { runloop_state: self.state, reward_cycle_info: self.current_reward_cycle_info, running_signers: self @@ -507,9 +510,23 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> .values() .map(|s| s.reward_cycle()) .collect(), - } - .into()]) - { + signer_state_machines: self + .stacks_signers + .iter() + .map(|(reward_cycle, signer)| { + let ConfiguredSigner::RegisteredSigner(ref signer) = signer else { + return (*reward_cycle, None); + }; + ( + *reward_cycle, + Some(signer.get_local_state_machine().clone()), + ) + }) + .collect(), + }; + info!("Signer status check requested: {state_info:?}"); + + if let Err(e) = res.send(state_info.into()) { error!("Failed to send status check result: {e}."); } } @@ -528,6 +545,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> warn!("Signer may have an outdated view of the network."); } } + let current_reward_cycle = self .current_reward_cycle_info .as_ref() @@ -547,6 +565,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> current_reward_cycle, ); } + if self.state == State::NoRegisteredSigners && event.is_some() { let next_reward_cycle = current_reward_cycle.saturating_add(1); info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index aa9b024643b..39f3b54d68f 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -35,7 +35,6 @@ use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, }; use serde::{Deserialize, Serialize}; -use slog::{slog_debug, slog_error}; use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::get_epoch_time_secs; @@ -167,6 +166,8 @@ pub struct BlockInfo { pub validation_time_ms: Option, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, + /// If this signer rejected this block, what was the reason + pub reject_reason: Option, } impl From for BlockInfo { @@ -184,6 +185,7 @@ impl From for BlockInfo { ext: ExtraBlockInfo::default(), state: BlockState::Unprocessed, validation_time_ms: None, + reject_reason: None, } } } @@ -507,6 +509,15 @@ ALTER TABLE block_rejection_signer_addrs ADD COLUMN reject_code INTEGER; "#; +static ADD_CONSENSUS_HASH: &str = r#" +ALTER TABLE burn_blocks + ADD COLUMN consensus_hash TEXT; +"#; + +static ADD_CONSENSUS_HASH_INDEX: &str = r#" +CREATE INDEX IF NOT EXISTS burn_blocks_ch on burn_blocks (consensus_hash); +"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, @@ -576,9 +587,15 @@ static SCHEMA_9: &[&str] = &[ "INSERT INTO db_config (version) VALUES (9);", ]; +static SCHEMA_10: &[&str] = &[ + ADD_CONSENSUS_HASH, + ADD_CONSENSUS_HASH_INDEX, + "INSERT INTO db_config (version) VALUES (10);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 9; + pub const SCHEMA_VERSION: u32 = 10; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -720,7 +737,7 @@ impl SignerDb { Ok(()) } - /// Migrate from schema 9 to schema 9 + /// Migrate from schema 8 to schema 9 fn schema_9_migration(tx: &Transaction) -> Result<(), DBError> { if Self::get_schema_version(tx)? >= 9 { // no migration necessary @@ -734,6 +751,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 9 to schema 10 + fn schema_10_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 10 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_10.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Register custom scalar functions used by the database fn register_scalar_functions(&self) -> Result<(), DBError> { // Register helper function for determining if a block is a tenure change transaction @@ -776,7 +807,8 @@ impl SignerDb { 6 => Self::schema_7_migration(&sql_tx)?, 7 => Self::schema_8_migration(&sql_tx)?, 8 => Self::schema_9_migration(&sql_tx)?, - 9 => break, + 9 => Self::schema_10_migration(&sql_tx)?, + 10 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -908,6 +940,7 @@ impl SignerDb { pub fn insert_burn_block( &mut self, burn_hash: &BurnchainHeaderHash, + consensus_hash: &ConsensusHash, burn_height: u64, received_time: &SystemTime, ) -> Result<(), DBError> { @@ -915,11 +948,12 @@ impl SignerDb { .duration_since(std::time::UNIX_EPOCH) .map_err(|e| DBError::Other(format!("Bad system time: {e}")))? .as_secs(); - debug!("Inserting burn block info"; "burn_block_height" => burn_height, "burn_hash" => %burn_hash, "received" => received_ts); + debug!("Inserting burn block info"; "burn_block_height" => burn_height, "burn_hash" => %burn_hash, "received" => received_ts, "ch" => %consensus_hash); self.db.execute( - "INSERT OR REPLACE INTO burn_blocks (block_hash, block_height, received_time) VALUES (?1, ?2, ?3)", + "INSERT OR REPLACE INTO burn_blocks (block_hash, consensus_hash, block_height, received_time) VALUES (?1, ?2, ?3, ?4)", params![ burn_hash, + consensus_hash, u64_to_sql(burn_height)?, u64_to_sql(received_ts)?, ], @@ -927,7 +961,7 @@ impl SignerDb { Ok(()) } - /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcheer by this signer + /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcher by this signer /// if that burn block has been received. pub fn get_burn_block_receive_time( &self, @@ -944,6 +978,23 @@ impl SignerDb { Ok(Some(receive_time)) } + /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcher by this signer + /// if that burn block has been received. + pub fn get_burn_block_receive_time_ch( + &self, + ch: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT received_time FROM burn_blocks WHERE consensus_hash = ? LIMIT 1"; + let Some(receive_time_i64) = query_row::(&self.db, query, &[ch])? else { + return Ok(None); + }; + let receive_time = u64::try_from(receive_time_i64).map_err(|e| { + error!("Failed to parse db received_time as u64: {e}"); + DBError::Corruption + })?; + Ok(Some(receive_time)) + } + /// Insert or replace a block into the database. /// Preserves the `broadcast` column if replacing an existing block. pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { @@ -960,7 +1011,7 @@ impl SignerDb { debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, - "sighash" => %hash, + "signer_signature_hash" => %hash, "block_id" => %block_id, "signed" => %signed_over, "broadcasted" => ?broadcasted, @@ -1013,7 +1064,7 @@ impl SignerDb { ]; debug!("Inserting block signature."; - "sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "signature" => %signature); self.db.execute(qry, args)?; @@ -1049,7 +1100,7 @@ impl SignerDb { ]; debug!("Inserting block rejection."; - "block_sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "signer_address" => %addr, "reject_reason" => %reject_reason ); @@ -1494,12 +1545,14 @@ mod tests { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let test_burn_hash = BurnchainHeaderHash([10; 32]); + let test_consensus_hash = ConsensusHash([13; 20]); let stime = SystemTime::now(); let time_to_epoch = stime .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); - db.insert_burn_block(&test_burn_hash, 10, &stime).unwrap(); + db.insert_burn_block(&test_burn_hash, &test_consensus_hash, 10, &stime) + .unwrap(); let stored_time = db .get_burn_block_receive_time(&test_burn_hash) @@ -2193,4 +2246,77 @@ mod tests { .unwrap() .is_none()); } + + /// BlockInfo without the `reject_reason` field for backwards compatibility testing + #[derive(Serialize, Deserialize, Debug, PartialEq)] + pub struct BlockInfoPrev { + /// The block we are considering + pub block: NakamotoBlock, + /// The burn block height at which the block was proposed + pub burn_block_height: u64, + /// The reward cycle the block belongs to + pub reward_cycle: u64, + /// Our vote on the block if we have one yet + pub vote: Option, + /// Whether the block contents are valid + pub valid: Option, + /// Whether this block is already being signed over + pub signed_over: bool, + /// Time at which the proposal was received by this signer (epoch time in seconds) + pub proposed_time: u64, + /// Time at which the proposal was signed by this signer (epoch time in seconds) + pub signed_self: Option, + /// Time at which the proposal was signed by a threshold in the signer set (epoch time in seconds) + pub signed_group: Option, + /// The block state relative to the signer's view of the stacks blockchain + pub state: BlockState, + /// Consumed processing time in milliseconds to validate this block + pub validation_time_ms: Option, + /// Extra data specific to v0, v1, etc. + pub ext: ExtraBlockInfo, + } + + /// Verify that we can deserialize the old BlockInfo struct into the new version + #[test] + fn deserialize_old_block_info() { + let block_info_prev = BlockInfoPrev { + block: NakamotoBlock { + header: NakamotoBlockHeader::genesis(), + txs: vec![], + }, + burn_block_height: 2, + reward_cycle: 3, + vote: None, + valid: None, + signed_over: true, + proposed_time: 4, + signed_self: None, + signed_group: None, + state: BlockState::Unprocessed, + validation_time_ms: Some(5), + ext: ExtraBlockInfo::default(), + }; + + let block_info: BlockInfo = + serde_json::from_value(serde_json::to_value(&block_info_prev).unwrap()).unwrap(); + assert_eq!(block_info.block, block_info_prev.block); + assert_eq!( + block_info.burn_block_height, + block_info_prev.burn_block_height + ); + assert_eq!(block_info.reward_cycle, block_info_prev.reward_cycle); + assert_eq!(block_info.vote, block_info_prev.vote); + assert_eq!(block_info.valid, block_info_prev.valid); + assert_eq!(block_info.signed_over, block_info_prev.signed_over); + assert_eq!(block_info.proposed_time, block_info_prev.proposed_time); + assert_eq!(block_info.signed_self, block_info_prev.signed_self); + assert_eq!(block_info.signed_group, block_info_prev.signed_group); + assert_eq!(block_info.state, block_info_prev.state); + assert_eq!( + block_info.validation_time_ms, + block_info_prev.validation_time_ms + ); + assert_eq!(block_info.ext, block_info_prev.ext); + assert!(block_info.reject_reason.is_none()); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 9ffabeed6c5..17450501c61 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -31,7 +31,6 @@ use clarity::types::chainstate::{BurnchainHeaderHash, SortitionId}; use clarity::util::vrf::VRFProof; use libsigner::v0::messages::RejectReason; use libsigner::{BlockProposal, BlockProposalData}; -use slog::slog_info; use stacks_common::bitvec::BitVec; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::info; @@ -251,7 +250,12 @@ fn reorg_timing_testing( let sortition_time = SystemTime::UNIX_EPOCH + Duration::from_secs(block_info_1.proposed_time + sortition_timing_secs); signer_db - .insert_burn_block(&view.cur_sortition.burn_block_hash, 3, &sortition_time) + .insert_burn_block( + &view.cur_sortition.burn_block_hash, + &view.cur_sortition.consensus_hash, + 3, + &sortition_time, + ) .unwrap(); let MockServerClient { @@ -385,10 +389,11 @@ fn check_block_proposal_timeout() { // Ensure we have a burn height to compare against let burn_hash = view.cur_sortition.burn_block_hash; + let consensus_hash = view.cur_sortition.consensus_hash; let burn_height = 1; let received_time = SystemTime::now(); signer_db - .insert_burn_block(&burn_hash, burn_height, &received_time) + .insert_burn_block(&burn_hash, &consensus_hash, burn_height, &received_time) .unwrap(); view.check_proposal( @@ -456,10 +461,11 @@ fn check_sortition_timeout() { }; // Ensure we have a burn height to compare against let burn_hash = sortition.burn_block_hash; + let consensus_hash = sortition.consensus_hash; let burn_height = 1; let received_time = SystemTime::now(); signer_db - .insert_burn_block(&burn_hash, burn_height, &received_time) + .insert_burn_block(&burn_hash, &consensus_hash, burn_height, &received_time) .unwrap(); std::thread::sleep(Duration::from_secs(1)); diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 34b363311ec..f1484028219 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -16,6 +16,8 @@ /// The signer module for processing events pub mod signer; +/// The state machine for the signer view +pub mod signer_state; #[cfg(any(test, feature = "testing"))] /// Test specific functions for the signer module diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index e25d39d41a9..4442df6d676 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,28 +15,37 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; +#[cfg(any(test, feature = "testing"))] +use std::sync::LazyLock; use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ - BlockValidateOk, BlockValidateReject, BlockValidateResponse, TOO_MANY_REQUESTS_STATUS, + BlockValidateOk, BlockValidateReject, BlockValidateResponse, ValidateRejectCode, + TOO_MANY_REQUESTS_STATUS, }; use blockstack_lib::util_lib::db::Error as DBError; use clarity::types::chainstate::StacksPrivateKey; +#[cfg(any(test, feature = "testing"))] +use clarity::types::chainstate::StacksPublicKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use clarity::util::secp256k1::Secp256k1PublicKey; +#[cfg(any(test, feature = "testing"))] +use clarity::util::sleep_ms; +#[cfg(any(test, feature = "testing"))] +use clarity::util::tests::TestFlag; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectReason, RejectReasonPrefix, SignerMessage, }; use libsigner::{BlockProposal, SignerEvent}; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error, info, warn}; +use super::signer_state::LocalStateMachine; use crate::chainstate::{ProposalEvalConfig, SortitionMinerStatus, SortitionsView}; use crate::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use crate::config::{SignerConfig, SignerConfigMode}; @@ -44,6 +53,12 @@ use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; +/// A global variable that can be used to make signers repeat their proposal +/// response if their public key is in the provided list +#[cfg(any(test, feature = "testing"))] +pub static TEST_REPEAT_PROPOSAL_RESPONSE: LazyLock>> = + LazyLock::new(TestFlag::default); + /// Signer running mode (whether dry-run or real) #[derive(Debug)] pub enum SignerMode { @@ -92,6 +107,8 @@ pub struct Signer { pub submitted_block_proposal: Option<(Sha512Trunc256Sum, Instant)>, /// Maximum age of a block proposal in seconds before it is dropped without processing pub block_proposal_max_age_secs: u64, + /// The signer's local state machine used in signer set agreement + pub local_state_machine: LocalStateMachine, } impl std::fmt::Display for SignerMode { @@ -111,8 +128,40 @@ impl std::fmt::Display for Signer { impl SignerTrait for Signer { /// Create a new signer from the given configuration - fn new(config: SignerConfig) -> Self { - Self::from(config) + fn new(stacks_client: &StacksClient, signer_config: SignerConfig) -> Self { + let stackerdb = StackerDB::from(&signer_config); + let mode = match signer_config.signer_mode { + SignerConfigMode::DryRun => SignerMode::DryRun, + SignerConfigMode::Normal { signer_id, .. } => SignerMode::Normal { signer_id }, + }; + + debug!("Reward cycle #{} {mode}", signer_config.reward_cycle); + + let signer_db = + SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); + let proposal_config = ProposalEvalConfig::from(&signer_config); + + let signer_state = LocalStateMachine::new(&signer_db, stacks_client, &proposal_config) + .unwrap_or_else(|e| { + warn!("Failed to initialize local state machine for signer: {e:?}"); + LocalStateMachine::Uninitialized + }); + Self { + private_key: signer_config.stacks_private_key, + stackerdb, + mainnet: signer_config.mainnet, + mode, + signer_addresses: signer_config.signer_entries.signer_addresses.clone(), + signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), + signer_slot_ids: signer_config.signer_slot_ids.clone(), + reward_cycle: signer_config.reward_cycle, + signer_db, + proposal_config, + submitted_block_proposal: None, + block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, + block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, + local_state_machine: signer_state, + } } /// Return the reward cycle of the signer @@ -126,7 +175,7 @@ impl SignerTrait for Signer { stacks_client: &StacksClient, sortition_state: &mut Option, event: Option<&SignerEvent>, - _res: &Sender>, + _res: &Sender, current_reward_cycle: u64, ) { let event_parity = match event { @@ -162,6 +211,13 @@ impl SignerTrait for Signer { debug!("{self}: Signer reward cycle has not yet started. Ignoring event."); return; } + + let prior_state = self.local_state_machine.clone(); + if self.reward_cycle <= current_reward_cycle { + self.local_state_machine.handle_pending_update(&self.signer_db, stacks_client, &self.proposal_config) + .unwrap_or_else(|e| error!("{self}: failed to update local state machine for pending update"; "err" => ?e)); + } + match event { SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); @@ -178,10 +234,17 @@ impl SignerTrait for Signer { ); // try and gather signatures for message in messages { - let SignerMessage::BlockResponse(block_response) = message else { - continue; - }; - self.handle_block_response(stacks_client, block_response, sortition_state); + match message { + SignerMessage::BlockResponse(block_response) => self.handle_block_response( + stacks_client, + block_response, + sortition_state, + ), + SignerMessage::StateMachineUpdate(_update) => { + // TODO: should make note of this update view point to determine if there is an agreed upon global state + } + _ => {} + } } } SignerEvent::MinerMessages(messages) => { @@ -199,7 +262,7 @@ impl SignerTrait for Signer { let Some(miner_pubkey) = block_proposal.block.header.recover_miner_pk() else { warn!("{self}: Failed to recover miner pubkey"; - "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "signer_signature_hash" => %block_proposal.block.header.signer_signature_hash(), "consensus_hash" => %block_proposal.block.header.consensus_hash); continue; }; @@ -217,7 +280,7 @@ impl SignerTrait for Signer { "{self}: Got block pushed message"; "block_id" => %b.block_id(), "block_height" => b.header.chain_length, - "signer_sighash" => %b.header.signer_signature_hash(), + "signer_signature_hash" => %b.header.signer_signature_hash(), ); #[cfg(any(test, feature = "testing"))] if self.test_skip_block_broadcast(b) { @@ -254,11 +317,17 @@ impl SignerTrait for Signer { SignerEvent::NewBurnBlock { burn_height, burn_header_hash, + consensus_hash, received_time, } => { info!("{self}: Received a new burn block event for block height {burn_height}"); self.signer_db - .insert_burn_block(burn_header_hash, *burn_height, received_time) + .insert_burn_block( + burn_header_hash, + consensus_hash, + *burn_height, + received_time, + ) .unwrap_or_else(|e| { error!( "Failed to write burn block event to signerdb"; @@ -268,20 +337,35 @@ impl SignerTrait for Signer { ); panic!("{self} Failed to write burn block event to signerdb: {e}"); }); + self.local_state_machine + .bitcoin_block_arrival(&self.signer_db, stacks_client, &self.proposal_config, Some(*burn_height)) + .unwrap_or_else(|e| error!("{self}: failed to update local state machine for latest bitcoin block arrival"; "err" => ?e)); *sortition_state = None; } SignerEvent::NewBlock { - block_hash, block_height, + block_id, + consensus_hash, + signer_sighash, } => { + let Some(signer_sighash) = signer_sighash else { + debug!("{self}: received a new block event for a pre-nakamoto block, no processing necessary"); + return; + }; debug!( "{self}: Received a new block event."; - "block_hash" => %block_hash, + "block_id" => %block_id, + "signer_signature_hash" => %signer_sighash, + "consensus_hash" => %consensus_hash, "block_height" => block_height ); + self.local_state_machine + .stacks_block_arrival(consensus_hash, *block_height, block_id) + .unwrap_or_else(|e| error!("{self}: failed to update local state machine for latest stacks block arrival"; "err" => ?e)); + if let Ok(Some(mut block_info)) = self .signer_db - .block_lookup(block_hash) + .block_lookup(signer_sighash) .inspect_err(|e| warn!("{self}: Failed to load block state: {e:?}")) { if block_info.state == BlockState::GloballyAccepted { @@ -298,6 +382,10 @@ impl SignerTrait for Signer { } } } + if prior_state != self.local_state_machine { + self.local_state_machine + .send_signer_update_message(&mut self.stackerdb); + } } fn has_unprocessed_blocks(&self) -> bool { @@ -309,37 +397,9 @@ impl SignerTrait for Signer { true }) } -} -impl From for Signer { - fn from(signer_config: SignerConfig) -> Self { - let stackerdb = StackerDB::from(&signer_config); - let mode = match signer_config.signer_mode { - SignerConfigMode::DryRun => SignerMode::DryRun, - SignerConfigMode::Normal { signer_id, .. } => SignerMode::Normal { signer_id }, - }; - - debug!("Reward cycle #{} {mode}", signer_config.reward_cycle); - - let signer_db = - SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); - let proposal_config = ProposalEvalConfig::from(&signer_config); - - Self { - private_key: signer_config.stacks_private_key, - stackerdb, - mainnet: signer_config.mainnet, - mode, - signer_addresses: signer_config.signer_entries.signer_addresses.clone(), - signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), - signer_slot_ids: signer_config.signer_slot_ids.clone(), - reward_cycle: signer_config.reward_cycle, - signer_db, - proposal_config, - submitted_block_proposal: None, - block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, - block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, - } + fn get_local_state_machine(&self) -> &LocalStateMachine { + &self.local_state_machine } } @@ -377,6 +437,7 @@ impl Signer { ), ) } + /// Create a block rejection response for a block with the given reject code pub fn create_block_rejection( &self, @@ -397,6 +458,7 @@ impl Signer { ), ) } + /// Check if block should be rejected based on sortition state /// Will return a BlockResponse::Rejection if the block is invalid, none otherwise. fn check_block_against_sortition_state( @@ -415,7 +477,7 @@ impl Signer { .inspect_err(|e| { warn!( "{self}: Failed to update sortition view: {e:?}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_id, ) }) @@ -435,7 +497,7 @@ impl Signer { Err(RejectReason::ConnectivityIssues(e)) => { warn!( "{self}: Error checking block proposal: {e}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_id, ); Some(self.create_block_rejection(RejectReason::ConnectivityIssues(e), block)) @@ -444,7 +506,7 @@ impl Signer { Err(reject_code) => { warn!( "{self}: Block proposal invalid"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_id, "reject_reason" => %reject_code, "reject_code" => ?reject_code, @@ -457,13 +519,52 @@ impl Signer { } else { warn!( "{self}: Cannot validate block, no sortition view"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_id, ); Some(self.create_block_rejection(RejectReason::NoSortitionView, block)) } } + /// The actual `send_block_response` implementation. Declared so that we do + /// not need to duplicate in testing. + fn impl_send_block_response(&mut self, block_response: BlockResponse) { + let res = self + .stackerdb + .send_message_with_retry::(block_response.clone().into()); + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + } + + #[cfg(any(test, feature = "testing"))] + fn send_block_response(&mut self, block_response: BlockResponse) { + const NUM_REPEATS: usize = 1; + let mut count = 0; + let public_keys = TEST_REPEAT_PROPOSAL_RESPONSE.get(); + if !public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + count = NUM_REPEATS; + } + while count <= NUM_REPEATS { + self.impl_send_block_response(block_response.clone()); + + count += 1; + sleep_ms(1000); + } + } + + #[cfg(not(any(test, feature = "testing")))] + fn send_block_response(&mut self, block_response: BlockResponse) { + self.impl_send_block_response(block_response) + } + /// Handle block proposal messages submitted to signers stackerdb fn handle_block_proposal( &mut self, @@ -491,7 +592,7 @@ impl Signer { { // Block is too old. Drop it with a warning. Don't even bother broadcasting to the node. warn!("{self}: Received a block proposal that is more than {} secs old. Ignoring...", self.block_proposal_max_age_secs; - "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "signer_signature_hash" => %block_proposal.block.header.signer_signature_hash(), "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, @@ -503,38 +604,24 @@ impl Signer { // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - if let Some(block_info) = self.block_lookup_by_reward_cycle(&signer_signature_hash) { - let Some(block_response) = self.determine_response(&block_info) else { - // We are still waiting for a response for this block. Do nothing. - debug!("{self}: Received a block proposal for a block we are already validating."; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id() - ); - return; - }; - // Submit a proposal response to the .signers contract for miners - debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - let accepted = matches!(block_response, BlockResponse::Accepted(..)); - match self - .stackerdb - .send_message_with_retry::(block_response.into()) - { - Ok(_) => { - crate::monitoring::actions::increment_block_responses_sent(accepted); - crate::monitoring::actions::record_block_response_latency( - &block_proposal.block, - ); - } - Err(e) => { - warn!("{self}: Failed to send block response to stacker-db: {e:?}",); - } - } - return; + let prior_evaluation = self + .block_lookup_by_reward_cycle(&signer_signature_hash) + .and_then(|block_info| if should_reevaluate_block(&block_info) { + debug!("Received a proposal for this block before, but our rejection reason allows us to reconsider"; + "reject_reason" => ?block_info.reject_reason); + None + } else { + Some(block_info) + }); + + // we previously considered this proposal, handle the status here + if let Some(block_info) = prior_evaluation { + return self.handle_prior_proposal_eval(&block_info); } info!( "{self}: received a block proposal for a new block."; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, @@ -553,7 +640,7 @@ impl Signer { .inspect_err(|e| { warn!( "{self}: Failed to update sortition view: {e:?}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), ) }) @@ -575,18 +662,7 @@ impl Signer { if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - let res = self - .stackerdb - .send_message_with_retry::(block_response.into()); - - match res { - Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), - Ok(ack) if !ack.accepted => warn!( - "{self}: Block rejection not accepted by stacker-db: {:?}", - ack.reason - ), - Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), - } + self.send_block_response(block_response); } else { // Just in case check if the last block validation submission timed out. self.check_submitted_block_proposal(); @@ -594,7 +670,7 @@ impl Signer { // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. info!( "{self}: submitting block proposal for validation"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, @@ -625,6 +701,32 @@ impl Signer { } } + fn handle_prior_proposal_eval(&mut self, block_info: &BlockInfo) { + let Some(block_response) = self.determine_response(block_info) else { + // We are still waiting for a response for this block. Do nothing. + debug!( + "{self}: Received a block proposal for a block we are already validating."; + "signer_signature_hash" => %block_info.signer_signature_hash(), + "block_id" => %block_info.block.block_id() + ); + return; + }; + + // Submit a proposal response to the .signers contract for miners + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + + let accepted = matches!(block_response, BlockResponse::Accepted(..)); + if let Err(e) = self + .stackerdb + .send_message_with_retry::(block_response.into()) + { + warn!("{self}: Failed to send block response to stacker-db: {e:?}"); + } else { + crate::monitoring::actions::increment_block_responses_sent(accepted); + crate::monitoring::actions::record_block_response_latency(&block_info.block); + } + } + /// Handle block response messages from a signer fn handle_block_response( &mut self, @@ -673,7 +775,7 @@ impl Signer { } Err(e) => { warn!("{self}: Error checking block proposal: {e}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %proposed_block.block_id() ); return Some(self.create_block_rejection( @@ -696,7 +798,7 @@ impl Signer { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %proposed_block_consensus_hash, - "proposed_block_signer_sighash" => %signer_signature_hash, + "proposed_block_signer_signature_hash" => %signer_signature_hash, "proposed_chain_length" => proposed_block.header.chain_length, "expected_at_least" => last_block_info.block.header.chain_length + 1, ); @@ -709,7 +811,7 @@ impl Signer { Ok(_) => {} Err(e) => { warn!("{self}: Failed to check block against signer db: {e}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %proposed_block.block_id() ); return Some(self.create_block_rejection( @@ -844,6 +946,8 @@ impl Signer { false, ), ); + + block_info.reject_reason = Some(block_rejection.response_data.reject_reason.clone()); self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| self.handle_insert_block_error(e)); @@ -952,7 +1056,7 @@ impl Signer { // This is weird. If this is reached, its probably an error in code logic or the db was flushed. // Why are we tracking a block submission for a block we have never seen / stored before. error!("{self}: tracking an unknown block validation submission."; - "signer_sighash" => %proposal_signer_sighash, + "signer_signature_hash" => %proposal_signer_sighash, ); return; } @@ -965,7 +1069,7 @@ impl Signer { // Reject it so we aren't holding up the network because of our inaction. warn!( "{self}: Failed to receive block validation response within {} ms. Rejecting block.", self.block_proposal_validation_timeout.as_millis(); - "signer_sighash" => %proposal_signer_sighash, + "signer_signature_hash" => %proposal_signer_sighash, ); let rejection = self.create_block_rejection( RejectReason::ConnectivityIssues( @@ -973,6 +1077,7 @@ impl Signer { ), &block_info.block, ); + block_info.reject_reason = Some(rejection.get_response_data().reject_reason.clone()); if let Err(e) = block_info.mark_locally_rejected() { if !block_info.has_reached_consensus() { warn!("{self}: Failed to mark block as locally rejected: {e:?}"); @@ -993,6 +1098,7 @@ impl Signer { ), Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), } + self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| self.handle_insert_block_error(e)); @@ -1056,7 +1162,7 @@ impl Signer { // recover public key let Ok(public_key) = rejection.recover_public_key() else { debug!("{self}: Received block rejection with an unrecovarable signature. Will not store."; - "block_hash" => %block_hash, + "signer_signature_hash" => %block_hash, "signature" => %signature ); return; @@ -1072,7 +1178,7 @@ impl Signer { if !is_valid_sig { debug!("{self}: Receive block rejection with an invalid signature. Will not store."; - "block_hash" => %block_hash, + "signer_signature_hash" => %block_hash, "signature" => %signature ); return; @@ -1086,6 +1192,7 @@ impl Signer { ) { warn!("{self}: Failed to save block rejection signature: {e:?}",); } + block_info.reject_reason = Some(rejection.response_data.reject_reason.clone()); // do we have enough signatures to mark a block a globally rejected? // i.e. is (set-size) - (threshold) + 1 reached. @@ -1173,7 +1280,7 @@ impl Signer { else { debug!("{self}: Received unrecovarable signature. Will not store."; "signature" => %signature, - "block_hash" => %block_hash); + "signer_signature_hash" => %block_hash); return; }; @@ -1366,3 +1473,33 @@ impl Signer { } } } + +/// Determine if a block should be re-evaluated based on its rejection reason˝ +fn should_reevaluate_block(block_info: &BlockInfo) -> bool { + if let Some(reject_reason) = &block_info.reject_reason { + match reject_reason { + RejectReason::ValidationFailed(ValidateRejectCode::UnknownParent) + | RejectReason::NoSortitionView + | RejectReason::ConnectivityIssues(_) + | RejectReason::TestingDirective + | RejectReason::InvalidTenureExtend + | RejectReason::NotRejected + | RejectReason::Unknown(_) => true, + RejectReason::ValidationFailed(_) + | RejectReason::RejectedInPriorRound + | RejectReason::SortitionViewMismatch + | RejectReason::ReorgNotAllowed + | RejectReason::InvalidBitvec + | RejectReason::PubkeyHashMismatch + | RejectReason::InvalidMiner + | RejectReason::NotLatestSortitionWinner + | RejectReason::InvalidParentBlock + | RejectReason::DuplicateBlockFound => { + // No need to re-validate these types of rejections. + false + } + } + } else { + false + } +} diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs new file mode 100644 index 00000000000..3339f1645b1 --- /dev/null +++ b/stacks-signer/src/v0/signer_state.rs @@ -0,0 +1,531 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::time::{Duration, UNIX_EPOCH}; + +use blockstack_lib::chainstate::burn::ConsensusHashExtensions; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use libsigner::v0::messages::{ + MessageSlotID, SignerMessage, StateMachineUpdate as StateMachineUpdateMessage, + StateMachineUpdateContent, StateMachineUpdateMinerState, +}; +use serde::{Deserialize, Serialize}; +use stacks_common::bitvec::BitVec; +use stacks_common::codec::Error as CodecError; +use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::{info, warn}; + +use crate::chainstate::{ + ProposalEvalConfig, SignerChainstateError, SortitionState, SortitionsView, +}; +use crate::client::{ClientError, CurrentAndLastSortition, StackerDB, StacksClient}; +use crate::signerdb::SignerDb; + +/// This is the latest supported protocol version for this signer binary +pub static SUPPORTED_SIGNER_PROTOCOL_VERSION: u64 = 0; + +/// A signer state machine view. This struct can +/// be used to encode the local signer's view or +/// the global view. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SignerStateMachine { + /// The tip burn block (i.e., the latest bitcoin block) seen by this signer + pub burn_block: ConsensusHash, + /// The tip burn block height (i.e., the latest bitcoin block) seen by this signer + pub burn_block_height: u64, + /// The signer's view of who the current miner should be (and their tenure building info) + pub current_miner: MinerState, + /// The active signing protocol version + pub active_signer_protocol_version: u64, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +/// Enum for capturing the signer state machine's view of who +/// should be the active miner and what their tenure should be +/// built on top of. +pub enum MinerState { + /// The information for the current active miner + ActiveMiner { + /// The pubkeyhash of the current miner's signing key + current_miner_pkh: Hash160, + /// The tenure ID of the current miner's active tenure + tenure_id: ConsensusHash, + /// The tenure that the current miner is building on top of + parent_tenure_id: ConsensusHash, + /// The last block of the parent tenure (which should be + /// the block that the next tenure starts from) + parent_tenure_last_block: StacksBlockId, + /// The height of the last block of the parent tenure (which should be + /// the block that the next tenure starts from) + parent_tenure_last_block_height: u64, + }, + /// This signer doesn't believe there's any valid miner + NoValidMiner, +} + +/// The local signer state machine +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum LocalStateMachine { + /// The local state machine couldn't be instantiated + Uninitialized, + /// The local state machine is instantiated + Initialized(SignerStateMachine), + /// The local state machine has a pending update + Pending { + /// The pending update + update: StateMachineUpdate, + /// The local state machine before the pending update + prior: SignerStateMachine, + }, +} + +/// A pending update for a signer state machine +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum StateMachineUpdate { + /// A new burn block at height u64 is expected + BurnBlock(u64), +} + +impl TryInto for &LocalStateMachine { + type Error = CodecError; + + fn try_into(self) -> Result { + let LocalStateMachine::Initialized(state_machine) = self else { + return Err(CodecError::SerializeError( + "Local state machine is not ready to be serialized into an update message".into(), + )); + }; + + let current_miner = match state_machine.current_miner { + MinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + } => StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + }, + MinerState::NoValidMiner => StateMachineUpdateMinerState::NoValidMiner, + }; + + StateMachineUpdateMessage::new( + state_machine.active_signer_protocol_version, + SUPPORTED_SIGNER_PROTOCOL_VERSION, + StateMachineUpdateContent::V0 { + burn_block: state_machine.burn_block, + burn_block_height: state_machine.burn_block_height, + current_miner, + }, + ) + } +} + +impl LocalStateMachine { + /// Initialize a local state machine by querying the local stacks-node + /// and signerdb for the current sortition information + pub fn new( + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result { + let mut instance = Self::Uninitialized; + instance.bitcoin_block_arrival(db, client, proposal_config, None)?; + + Ok(instance) + } + + fn place_holder() -> SignerStateMachine { + SignerStateMachine { + burn_block: ConsensusHash::empty(), + burn_block_height: 0, + current_miner: MinerState::NoValidMiner, + active_signer_protocol_version: SUPPORTED_SIGNER_PROTOCOL_VERSION, + } + } + + /// Send the local state machine as a signer update message to stackerdb + pub fn send_signer_update_message(&self, stackerdb: &mut StackerDB) { + let update: Result = self.try_into(); + match update { + Ok(update) => { + if let Err(e) = stackerdb.send_message_with_retry::(update.into()) { + warn!("Failed to send signer update to stacker-db: {e:?}",); + } + } + Err(e) => { + warn!("Failed to convert local signer state to a signer message: {e:?}"); + } + } + } + + /// If this local state machine has pending updates, process them + pub fn handle_pending_update( + &mut self, + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result<(), SignerChainstateError> { + let LocalStateMachine::Pending { update, .. } = self else { + return self.check_miner_inactivity(db, client, proposal_config); + }; + match update.clone() { + StateMachineUpdate::BurnBlock(expected_burn_height) => { + self.bitcoin_block_arrival(db, client, proposal_config, Some(expected_burn_height)) + } + } + } + + fn is_timed_out( + sortition: &ConsensusHash, + db: &SignerDb, + proposal_config: &ProposalEvalConfig, + ) -> Result { + // if we've already signed a block in this tenure, the miner can't have timed out. + let has_block = db.has_signed_block_in_tenure(sortition)?; + if has_block { + return Ok(false); + } + let Some(received_ts) = db.get_burn_block_receive_time_ch(sortition)? else { + return Ok(false); + }; + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let last_activity = db + .get_last_activity_time(sortition)? + .map(|time| UNIX_EPOCH + Duration::from_secs(time)) + .unwrap_or(received_time); + + let Ok(elapsed) = std::time::SystemTime::now().duration_since(last_activity) else { + return Ok(false); + }; + + if elapsed > proposal_config.block_proposal_timeout { + info!( + "Tenure miner was inactive too long and timed out"; + "tenure_ch" => %sortition, + "elapsed_inactive" => elapsed.as_secs(), + "config_block_proposal_timeout" => proposal_config.block_proposal_timeout.as_secs() + ); + } + Ok(elapsed > proposal_config.block_proposal_timeout) + } + + fn check_miner_inactivity( + &mut self, + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result<(), SignerChainstateError> { + let Self::Initialized(ref mut state_machine) = self else { + // no inactivity if the state machine isn't initialized + return Ok(()); + }; + + let MinerState::ActiveMiner { ref tenure_id, .. } = state_machine.current_miner else { + // no inactivity if there's no active miner + return Ok(()); + }; + + if !Self::is_timed_out(tenure_id, db, proposal_config)? { + return Ok(()); + } + + // the tenure timed out, try to see if we can use the prior tenure instead + let CurrentAndLastSortition { last_sortition, .. } = + client.get_current_and_last_sortition()?; + let last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten(); + let Some(last_sortition) = last_sortition else { + warn!("Current miner timed out due to inactivity, but could not find a valid prior miner. Allowing current miner to continue"); + return Ok(()); + }; + + if Self::is_tenure_valid(&last_sortition, db, client, proposal_config)? { + let new_active_tenure_ch = last_sortition.consensus_hash; + let inactive_tenure_ch = *tenure_id; + state_machine.current_miner = + Self::make_miner_state(last_sortition, client, db, proposal_config)?; + info!( + "Current tenure timed out, setting the active miner to the prior tenure"; + "inactive_tenure_ch" => %inactive_tenure_ch, + "new_active_tenure_ch" => %new_active_tenure_ch + ); + Ok(()) + } else { + warn!("Current miner timed out due to inactivity, but prior miner is not valid. Allowing current miner to continue"); + Ok(()) + } + } + + fn make_miner_state( + sortition_to_set: SortitionState, + client: &StacksClient, + db: &SignerDb, + proposal_config: &ProposalEvalConfig, + ) -> Result { + let next_current_miner_pkh = sortition_to_set.miner_pkh; + let next_parent_tenure_id = sortition_to_set.parent_tenure_id; + + let stacks_node_last_block = client + .get_tenure_tip(&next_parent_tenure_id) + .inspect_err(|e| { + warn!( + "Failed to fetch last block in parent tenure from stacks-node"; + "parent_tenure_id" => %sortition_to_set.parent_tenure_id, + "err" => ?e, + ) + }) + .ok() + .map(|header| { + ( + header.height(), + StacksBlockId::new(&next_parent_tenure_id, &header.block_hash()), + ) + }); + let signerdb_last_block = SortitionsView::get_tenure_last_block_info( + &next_parent_tenure_id, + db, + proposal_config.tenure_last_block_proposal_timeout, + )? + .map(|info| (info.block.header.chain_length, info.block.block_id())); + + let (parent_tenure_last_block_height, parent_tenure_last_block) = + match (stacks_node_last_block, signerdb_last_block) { + (Some(stacks_node_info), Some(signerdb_info)) => { + std::cmp::max_by_key(stacks_node_info, signerdb_info, |info| info.0) + } + (None, Some(signerdb_info)) => signerdb_info, + (Some(stacks_node_info), None) => stacks_node_info, + (None, None) => { + return Err(SignerChainstateError::NoParentTenureInfo( + next_parent_tenure_id, + )) + } + }; + + let miner_state = MinerState::ActiveMiner { + current_miner_pkh: next_current_miner_pkh, + tenure_id: sortition_to_set.consensus_hash, + parent_tenure_id: next_parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + }; + + Ok(miner_state) + } + + /// Handle a new stacks block arrival + pub fn stacks_block_arrival( + &mut self, + ch: &ConsensusHash, + height: u64, + block_id: &StacksBlockId, + ) -> Result<(), SignerChainstateError> { + // set self to uninitialized so that if this function errors, + // self is left as uninitialized. + let prior_state = std::mem::replace(self, Self::Uninitialized); + let mut prior_state_machine = match prior_state { + // if the local state machine was uninitialized, just initialize it + LocalStateMachine::Initialized(signer_state_machine) => signer_state_machine, + LocalStateMachine::Uninitialized => { + // we don't need to update any state when we're uninitialized for new stacks block + // arrivals + return Ok(()); + } + LocalStateMachine::Pending { update, prior } => { + // This works as long as the pending updates are only burn blocks, + // but if we have other kinds of pending updates, this logic will need + // to be changed. + match &update { + StateMachineUpdate::BurnBlock(..) => { + *self = LocalStateMachine::Pending { update, prior }; + return Ok(()); + } + } + } + }; + + let MinerState::ActiveMiner { + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + .. + } = &mut prior_state_machine.current_miner + else { + // if there's no valid miner, then we don't need to update any state for new stacks blocks + *self = LocalStateMachine::Initialized(prior_state_machine); + return Ok(()); + }; + + if parent_tenure_id != ch { + // if the new block isn't from the parent tenure, we don't need any updates + *self = LocalStateMachine::Initialized(prior_state_machine); + return Ok(()); + } + + if height <= *parent_tenure_last_block_height { + // if the new block isn't higher than we already expected, we don't need any updates + *self = LocalStateMachine::Initialized(prior_state_machine); + return Ok(()); + } + + *parent_tenure_last_block = *block_id; + *parent_tenure_last_block_height = height; + *self = LocalStateMachine::Initialized(prior_state_machine); + Ok(()) + } + + /// check if the tenure defined by sortition state: + /// (1) chose an appropriate parent tenure + /// (2) has not "timed out" + fn is_tenure_valid( + sortition_state: &SortitionState, + signer_db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result { + let standin_block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 0, + chain_length: 0, + burn_spent: 0, + consensus_hash: sortition_state.consensus_hash, + parent_block_id: StacksBlockId::first_mined(), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 0, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }; + + let chose_good_parent = SortitionsView::check_parent_tenure_choice( + sortition_state, + &standin_block, + signer_db, + client, + &proposal_config.first_proposal_burn_block_timing, + )?; + if !chose_good_parent { + return Ok(false); + } + Self::is_timed_out(&sortition_state.consensus_hash, signer_db, proposal_config) + .map(|timed_out| !timed_out) + } + + /// Handle a new bitcoin block arrival + pub fn bitcoin_block_arrival( + &mut self, + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + mut expected_burn_height: Option, + ) -> Result<(), SignerChainstateError> { + // set self to uninitialized so that if this function errors, + // self is left as uninitialized. + let prior_state = std::mem::replace(self, Self::Uninitialized); + let prior_state_machine = match prior_state { + // if the local state machine was uninitialized, just initialize it + LocalStateMachine::Uninitialized => Self::place_holder(), + LocalStateMachine::Initialized(signer_state_machine) => signer_state_machine, + LocalStateMachine::Pending { update, prior } => { + // This works as long as the pending updates are only burn blocks, + // but if we have other kinds of pending updates, this logic will need + // to be changed. + match update { + StateMachineUpdate::BurnBlock(pending_burn_height) => { + if pending_burn_height > expected_burn_height.unwrap_or(0) { + expected_burn_height = Some(pending_burn_height); + } + } + } + + prior + } + }; + + let peer_info = client.get_peer_info()?; + let next_burn_block_height = peer_info.burn_block_height; + let next_burn_block_hash = peer_info.pox_consensus; + + if let Some(expected_burn_height) = expected_burn_height { + if next_burn_block_height < expected_burn_height { + *self = Self::Pending { + update: StateMachineUpdate::BurnBlock(expected_burn_height), + prior: prior_state_machine, + }; + return Err(ClientError::InvalidResponse( + "Node has not processed the next burn block yet".into(), + ) + .into()); + } + } + + let CurrentAndLastSortition { + current_sortition, + last_sortition, + } = client.get_current_and_last_sortition()?; + + let cur_sortition = SortitionState::try_from(current_sortition)?; + let last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten() + .ok_or_else(|| { + ClientError::InvalidResponse( + "Fetching latest and last sortitions failed to return both sortitions".into(), + ) + })?; + + let is_current_valid = Self::is_tenure_valid(&cur_sortition, db, client, proposal_config)?; + + let miner_state = if is_current_valid { + Self::make_miner_state(cur_sortition, client, db, proposal_config)? + } else { + let is_last_valid = + Self::is_tenure_valid(&last_sortition, db, client, proposal_config)?; + + if is_last_valid { + Self::make_miner_state(last_sortition, client, db, proposal_config)? + } else { + warn!("Neither the current nor the prior sortition winner is considered a valid tenure"); + MinerState::NoValidMiner + } + }; + + // Note: we do this at the end so that the transform isn't fallible. + // we should come up with a better scheme here. + *self = Self::Initialized(SignerStateMachine { + burn_block: next_burn_block_hash, + burn_block_height: next_burn_block_height, + current_miner: miner_state, + active_signer_protocol_version: prior_state_machine.active_signer_protocol_version, + }); + + Ok(()) + } +} diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs index 06ccf7ee22a..6fb7ffa9fe5 100644 --- a/stacks-signer/src/v0/tests.rs +++ b/stacks-signer/src/v0/tests.rs @@ -18,7 +18,6 @@ use std::sync::LazyLock; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use libsigner::v0::messages::{BlockResponse, RejectReason}; use libsigner::BlockProposal; -use slog::{slog_info, slog_warn}; use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::tests::TestFlag; @@ -92,6 +91,9 @@ impl Signer { warn!("{self}: Failed to mark block as locally rejected: {e:?}"); } }; + + block_info.reject_reason = Some(RejectReason::TestingDirective); + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject // as invalid since we rejected in a prior round if this crops up again) // in case this is the first time we saw this block. Safe to do since this is testing case only. diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index cf0ae6c1f86..0145f0382d3 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -83,10 +83,6 @@ features = ["serde", "recovery"] [dependencies.ed25519-dalek] workspace = true -[dependencies.curve25519-dalek] -version = "=2.0.0" -features = ["serde"] - [dependencies.time] version = "0.2.23" features = ["std"] diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 9cfee0a7dee..f587ed3e43d 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -241,7 +241,7 @@ impl TestMiner { ); match self.vrf_key_map.get(vrf_pubkey) { Some(prover_key) => { - let proof = VRF::prove(prover_key, last_sortition_hash.as_bytes()); + let proof = VRF::prove(prover_key, last_sortition_hash.as_bytes())?; let valid = match VRF::verify(vrf_pubkey, &proof, last_sortition_hash.as_bytes()) { Ok(v) => v, Err(e) => false, diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 29ed43e0451..a8eb83eb250 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -5645,7 +5645,8 @@ impl SortitionHandleTx<'_> { "ACCEPTED({}) leader block commit {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex; "apparent_sender" => %op.apparent_sender, - "stacks_block_hash" => %op.block_header_hash + "stacks_block_hash" => %op.block_header_hash, + "parent_burn_block" => %op.parent_block_ptr ); self.insert_block_commit(op, sort_id) } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index c7bb456f44d..82aadee9a51 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -392,6 +392,7 @@ pub fn setup_states_with_epochs( Value::UInt(burnchain.pox_constants.pox_rejection_fraction as u128), ], |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX contract"); }); @@ -646,7 +647,7 @@ fn make_genesis_block_with_recipients( let parent_stacks_header = StacksHeaderInfo::regtest_genesis(); - let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()); + let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()).unwrap(); let mut builder = StacksBlockBuilder::make_regtest_block_builder( burnchain, @@ -666,7 +667,7 @@ fn make_genesis_block_with_recipients( .0; builder - .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules) + .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules, None) .unwrap(); let block = builder.mine_anchored_block(&mut epoch_tx); @@ -909,7 +910,7 @@ fn make_stacks_block_with_input( eprintln!("Build off of {:?}", &parent_stacks_header); - let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()); + let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()).unwrap(); let total_burn = parents_sortition.total_burn; @@ -931,11 +932,13 @@ fn make_stacks_block_with_input( .0; builder - .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules) + .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules, None) .unwrap(); for tx in txs { - builder.try_mine_tx(&mut epoch_tx, tx, ast_rules).unwrap(); + builder + .try_mine_tx(&mut epoch_tx, tx, ast_rules, None) + .unwrap(); } let block = builder.mine_anchored_block(&mut epoch_tx); diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index d9ad1319f71..77e74fcce61 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -691,6 +691,7 @@ impl BlockBuilder for NakamotoBlockBuilder { tx_len: u64, limit_behavior: &BlockLimitFunction, ast_rules: ASTRules, + max_execution_time: Option, ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { return TransactionResult::skipped_due_to_error(tx, Error::BlockTooBigError); @@ -737,13 +738,20 @@ impl BlockBuilder for NakamotoBlockBuilder { } let cost_before = clarity_tx.cost_so_far(); - let (fee, receipt) = - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ast_rules) { - Ok(x) => x, - Err(e) => { - return parse_process_transaction_error(clarity_tx, tx, e); - } - }; + + let (_fee, receipt) = match StacksChainState::process_transaction( + clarity_tx, + tx, + quiet, + ast_rules, + max_execution_time, + ) { + Ok(x) => x, + Err(e) => { + return parse_process_transaction_error(clarity_tx, tx, e); + } + }; + let cost_after = clarity_tx.cost_so_far(); let mut soft_limit_reached = false; // We only attempt to apply the soft limit to non-boot code contract calls. @@ -764,7 +772,7 @@ impl BlockBuilder for NakamotoBlockBuilder { // save self.txs.push(tx.clone()); - TransactionResult::success_with_soft_limit(tx, fee, receipt, soft_limit_reached) + TransactionResult::success_with_soft_limit(tx, receipt, soft_limit_reached) }; self.bytes_so_far += tx_len; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index c5df44c6189..6eeebc54dbb 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4896,6 +4896,7 @@ impl NakamotoChainState { &contract_content, None, |_, _| false, + None, ) .unwrap(); clarity.save_analysis(&contract_id, &analysis).unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 6b00e9ac400..7ea84b7902a 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -539,6 +539,7 @@ impl NakamotoBlockBuilder { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ) { TransactionResult::Success(..) => { debug!("Included {}", &tx.txid()); diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 44e1de41cb3..a56f07bb5d4 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1628,7 +1628,7 @@ fn test_nakamoto_block_static_verification() { let vrf_privkey = VRFPrivateKey::new(); let vrf_pubkey = VRFPublicKey::from_private(&vrf_privkey); let sortition_hash = SortitionHash([0x01; 32]); - let vrf_proof = VRF::prove(&vrf_privkey, sortition_hash.as_bytes()); + let vrf_proof = VRF::prove(&vrf_privkey, sortition_hash.as_bytes()).unwrap(); let burn_recipient = StacksAddress::burn_address(false).to_account_principal(); let alt_recipient = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key_2)) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 9abe9b9a4e8..d9bfafcb1ed 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -1037,6 +1037,7 @@ impl TestStacksNode { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ) { TransactionResult::Success(..) => { debug!("Included {}", &tx.txid()); diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 2d88cfe2344..4ac645e6bd7 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -1178,6 +1178,7 @@ fn pox_2_delegate_extend_units() { Value::UInt(0), ], |_, _| false, + None, ) }) .unwrap(); @@ -1779,7 +1780,15 @@ fn test_deploy_smart_contract( block.as_transaction(|tx| { let (ast, analysis) = tx.analyze_smart_contract(contract_id, version, content, ASTRules::PrecheckSize)?; - tx.initialize_smart_contract(contract_id, version, &ast, content, None, |_, _| false)?; + tx.initialize_smart_contract( + contract_id, + version, + &ast, + content, + None, + |_, _| false, + None, + )?; tx.save_analysis(contract_id, &analysis)?; return Ok(()); }) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index af1129b9f04..2e6b74596df 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4010,7 +4010,7 @@ impl StacksChainState { debug!("Process microblock {}", µblock.block_hash()); for (tx_index, tx) in microblock.txs.iter().enumerate() { let (tx_fee, mut tx_receipt) = - StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules) + StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules, None) .map_err(|e| (e, microblock.block_hash()))?; tx_receipt.microblock_header = Some(microblock.header.clone()); @@ -4176,6 +4176,7 @@ impl StacksChainState { "stack-stx", &args, |_, _| false, + None, ) }); match result { @@ -4384,6 +4385,7 @@ impl StacksChainState { reward_addr_val, ], |_, _| false, + None, ) }); match result { @@ -4490,6 +4492,7 @@ impl StacksChainState { Value::UInt(reward_cycle.clone().into()), ], |_, _| false, + None, ) }); match result { @@ -4567,7 +4570,7 @@ impl StacksChainState { let mut receipts = vec![]; for tx in block_txs.iter() { let (tx_fee, mut tx_receipt) = - StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules)?; + StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules, None)?; fees = fees.checked_add(u128::from(tx_fee)).expect("Fee overflow"); tx_receipt.tx_index = tx_index; burns = burns diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 78811ff0b80..bab5d57a722 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1323,6 +1323,7 @@ impl StacksChainState { &boot_code_smart_contract, &boot_code_account, ASTRules::PrecheckSize, + None, ) })?; receipts.push(tx_receipt); @@ -1646,6 +1647,7 @@ impl StacksChainState { "set-burnchain-parameters", ¶ms, |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX contract"); }); diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index f92bde7d981..20eef9f7719 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -974,6 +974,7 @@ impl StacksChainState { tx: &StacksTransaction, origin_account: &StacksAccount, ast_rules: ASTRules, + max_execution_time: Option, ) -> Result { match tx.payload { TransactionPayload::TokenTransfer(ref addr, ref amount, ref memo) => { @@ -1044,6 +1045,7 @@ impl StacksChainState { ) .expect("FATAL: error while evaluating post-conditions") }, + max_execution_time, ); let mut total_cost = clarity_tx.cost_so_far(); @@ -1280,6 +1282,7 @@ impl StacksChainState { ) .expect("FATAL: error while evaluating post-conditions") }, + max_execution_time, ); let mut total_cost = clarity_tx.cost_so_far(); @@ -1471,6 +1474,7 @@ impl StacksChainState { tx: &StacksTransaction, quiet: bool, ast_rules: ASTRules, + max_execution_time: Option, ) -> Result<(u64, StacksTransactionReceipt), Error> { debug!("Process transaction {} ({})", tx.txid(), tx.payload.name()); let epoch = clarity_block.get_epoch(); @@ -1509,6 +1513,7 @@ impl StacksChainState { tx, &origin_account, ast_rules, + max_execution_time, )?; // update the account nonces @@ -1537,6 +1542,7 @@ impl StacksChainState { tx, &origin_account, ast_rules, + None, )?; let new_payer_account = StacksChainState::get_payer_account(&mut transaction, tx); @@ -1700,6 +1706,7 @@ pub mod test { stx_balance: STXBalance::Unlocked { amount: 100 }, }, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -1764,6 +1771,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -1815,6 +1823,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2009,6 +2018,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ); if let Err(Error::InvalidStacksTransaction(msg, false)) = res { assert!(msg.contains(&err_frag), "{err_frag}"); @@ -2099,6 +2109,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2179,6 +2190,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2273,6 +2285,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ); if expected_behavior[i] { assert!(res.is_ok()); @@ -2366,6 +2379,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2470,6 +2484,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2558,6 +2573,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2671,6 +2687,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2683,6 +2700,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2805,6 +2823,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2820,6 +2839,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2895,6 +2915,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2946,6 +2967,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3010,6 +3032,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3115,6 +3138,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3153,6 +3177,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ); assert!(res.is_err()); @@ -3184,6 +3209,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3224,6 +3250,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ); assert!(res.is_ok()); @@ -3349,6 +3376,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3365,6 +3393,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3876,6 +3905,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3901,6 +3931,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_stackaroos_balance += 100; @@ -3931,6 +3962,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_stackaroos_balance -= 100; @@ -3978,6 +4010,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_nonce += 1; @@ -4008,6 +4041,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_nonce += 1; @@ -4051,6 +4085,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_recv_nonce += 1; @@ -4099,6 +4134,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_nonce += 1; @@ -4594,6 +4630,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -4618,6 +4655,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_stackaroos_balance += 100; @@ -4665,6 +4703,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_stackaroos_balance -= 100; @@ -4731,6 +4770,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_nonce += 1; @@ -4789,6 +4829,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_recv_nonce += 1; @@ -4955,6 +4996,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -4963,6 +5005,7 @@ pub mod test { &contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8059,6 +8102,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); let err = StacksChainState::process_transaction( @@ -8066,6 +8110,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap_err(); @@ -8090,6 +8135,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); let (fee, _) = StacksChainState::process_transaction( @@ -8097,6 +8143,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8244,6 +8291,7 @@ pub mod test { &signed_tx_poison_microblock, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8364,6 +8412,7 @@ pub mod test { &signed_tx_poison_microblock, false, ASTRules::PrecheckSize, + None, ) .unwrap_err(); let Error::ClarityError(clarity_error::BadTransaction(msg)) = &err else { @@ -8482,6 +8531,7 @@ pub mod test { &signed_tx_poison_microblock_1, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8496,6 +8546,7 @@ pub mod test { &signed_tx_poison_microblock_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8761,6 +8812,7 @@ pub mod test { &smart_contract_v2, false, ASTRules::PrecheckSize, + None, ) { assert!(msg.find("not in Stacks epoch 2.1 or later").is_some()); } else { @@ -9053,6 +9105,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9062,6 +9115,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); @@ -9081,6 +9135,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9090,6 +9145,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); @@ -9109,6 +9165,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9118,6 +9175,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap_err(); conn.commit_block(); @@ -9220,6 +9278,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9229,6 +9288,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); @@ -9248,6 +9308,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9257,6 +9318,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); @@ -9276,6 +9338,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9285,6 +9348,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap_err(); conn.commit_block(); @@ -9310,7 +9374,7 @@ pub mod test { return Err(Error::InvalidStacksTransaction(msg, false)); } - StacksChainState::process_transaction(clarity_block, tx, quiet, ast_rules) + StacksChainState::process_transaction(clarity_block, tx, quiet, ast_rules, None) } #[test] @@ -9875,6 +9939,7 @@ pub mod test { &signed_runtime_checkerror_tx_clar1, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 819de80a333..ac6dfe448c6 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -219,6 +219,7 @@ pub struct BlockBuilderSettings { pub miner_status: Arc>, /// Should the builder attempt to confirm any parent microblocks pub confirm_microblocks: bool, + pub max_execution_time: Option, } impl BlockBuilderSettings { @@ -230,6 +231,7 @@ impl BlockBuilderSettings { mempool_settings: MemPoolWalkSettings::default(), miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(0))), confirm_microblocks: true, + max_execution_time: None, } } @@ -241,6 +243,7 @@ impl BlockBuilderSettings { mempool_settings: MemPoolWalkSettings::zero(), miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(0))), confirm_microblocks: true, + max_execution_time: None, } } } @@ -418,10 +421,11 @@ pub enum TransactionEvent { impl TransactionResult { /// Logs a queryable message for the case where `txid` has succeeded. pub fn log_transaction_success(tx: &StacksTransaction) { - info!("Tx successfully processed."; + info!("Tx successfully processed"; "event_name" => %"transaction_result", "tx_id" => %tx.txid(), "event_type" => %"success", + "fee" => tx.get_tx_fee() ); } @@ -445,6 +449,7 @@ impl TransactionResult { "tx_id" => %tx.txid(), "event_type" => "skip", "reason" => %err, + "fee" => tx.get_tx_fee() ); } @@ -463,13 +468,12 @@ impl TransactionResult { /// This method logs "transaction success" as a side effect. pub fn success( transaction: &StacksTransaction, - fee: u64, receipt: StacksTransactionReceipt, ) -> TransactionResult { Self::log_transaction_success(transaction); Self::Success(TransactionSuccess { tx: transaction.clone(), - fee, + fee: transaction.get_tx_fee(), receipt, soft_limit_reached: false, }) @@ -479,14 +483,13 @@ impl TransactionResult { /// This method logs "transaction success" as a side effect. pub fn success_with_soft_limit( transaction: &StacksTransaction, - fee: u64, receipt: StacksTransactionReceipt, soft_limit_reached: bool, ) -> TransactionResult { Self::log_transaction_success(transaction); Self::Success(TransactionSuccess { tx: transaction.clone(), - fee, + fee: transaction.get_tx_fee(), receipt, soft_limit_reached, }) @@ -679,6 +682,7 @@ pub trait BlockBuilder { tx_len: u64, limit_behavior: &BlockLimitFunction, ast_rules: ASTRules, + max_execution_time: Option, ) -> TransactionResult; /// Append a transaction if doing so won't exceed the epoch data size. @@ -688,6 +692,7 @@ pub trait BlockBuilder { clarity_tx: &mut ClarityTx, tx: &StacksTransaction, ast_rules: ASTRules, + max_execution_time: Option, ) -> Result { let tx_len = tx.tx_len(); match self.try_mine_tx_with_len( @@ -696,6 +701,7 @@ pub trait BlockBuilder { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ast_rules, + max_execution_time, ) { TransactionResult::Success(s) => Ok(TransactionResult::Success(s)), TransactionResult::Skipped(TransactionSkipped { error, .. }) @@ -1053,8 +1059,8 @@ impl<'a> StacksMicroblockBuilder<'a> { } let quiet = !cfg!(test); - match StacksChainState::process_transaction(clarity_tx, &tx, quiet, ast_rules) { - Ok((fee, receipt)) => Ok(TransactionResult::success(&tx, fee, receipt)), + match StacksChainState::process_transaction(clarity_tx, &tx, quiet, ast_rules, None) { + Ok((_fee, receipt)) => Ok(TransactionResult::success(&tx, receipt)), Err(e) => { let (is_problematic, e) = TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); @@ -1246,7 +1252,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let deadline = get_epoch_time_ms() + u128::from(self.settings.max_miner_time_ms); let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; - mem_pool.reset_nonce_cache()?; + mem_pool.reset_mempool_caches()?; let stacks_epoch_id = clarity_tx.get_epoch(); let block_limit = clarity_tx .block_limit() @@ -1686,7 +1692,13 @@ impl StacksBlockBuilder { let quiet = !cfg!(test); if !self.anchored_done { // save - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { + match StacksChainState::process_transaction( + clarity_tx, + tx, + quiet, + ASTRules::Typical, + None, + ) { Ok((fee, receipt)) => { self.total_anchored_fees += fee; } @@ -1697,7 +1709,13 @@ impl StacksBlockBuilder { self.txs.push(tx.clone()); } else { - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { + match StacksChainState::process_transaction( + clarity_tx, + tx, + quiet, + ASTRules::Typical, + None, + ) { Ok((fee, receipt)) => { self.total_streamed_fees += fee; } @@ -2097,7 +2115,7 @@ impl StacksBlockBuilder { let ast_rules = miner_epoch_info.ast_rules; let (mut epoch_tx, _) = builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; for tx in txs.into_iter() { - match builder.try_mine_tx(&mut epoch_tx, &tx, ast_rules.clone()) { + match builder.try_mine_tx(&mut epoch_tx, &tx, ast_rules.clone(), None) { Ok(_) => { debug!("Included {}", &tx.txid()); } @@ -2267,7 +2285,12 @@ impl StacksBlockBuilder { for initial_tx in initial_txs.iter() { tx_events.push( builder - .try_mine_tx(epoch_tx, initial_tx, ast_rules.clone())? + .try_mine_tx( + epoch_tx, + initial_tx, + ast_rules.clone(), + settings.max_execution_time, + )? .convert_to_event(), ); } @@ -2287,13 +2310,10 @@ impl StacksBlockBuilder { } } - mempool.reset_nonce_cache()?; mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?; let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; let mut considered = HashSet::new(); // txids of all transactions we looked at - let mut mined_origin_nonces: HashMap = HashMap::new(); // map addrs of mined transaction origins to the nonces we used - let mut mined_sponsor_nonces: HashMap = HashMap::new(); // map addrs of mined transaction sponsors to the nonces we used let mut invalidated_txs = vec![]; let mut to_drop_and_blacklist = vec![]; @@ -2308,6 +2328,17 @@ impl StacksBlockBuilder { let mut loop_result = Ok(()); while block_limit_hit != BlockLimitFunction::LIMIT_REACHED { let mut num_considered = 0; + + // Check if we've been preempted before we attempt mining. + // This is important because otherwise, we will add unnecessary + // contention on the mempool DB. + blocked = + (*settings.miner_status.lock().expect("FATAL: mutex poisoned")).is_blocked(); + if blocked { + info!("Miner stopping due to preemption"); + break; + } + let intermediate_result = mempool.iterate_candidates( epoch_tx, &mut tx_events, @@ -2317,7 +2348,7 @@ impl StacksBlockBuilder { blocked = (*settings.miner_status.lock().expect("FATAL: mutex poisoned")) .is_blocked(); if blocked { - debug!("Miner stopping due to preemption"); + info!("Miner stopping due to preemption"); return Ok(None); } @@ -2325,16 +2356,20 @@ impl StacksBlockBuilder { let update_estimator = to_consider.update_estimate; if block_limit_hit == BlockLimitFunction::LIMIT_REACHED { + info!("Miner stopping due to limit reached"); return Ok(None); } let time_now = get_epoch_time_ms(); if time_now >= deadline { - debug!("Miner mining time exceeded ({} ms)", max_miner_time_ms); + info!( + "Miner stopping due to mining time exceeded ({} ms)", + max_miner_time_ms + ); return Ok(None); } if let Some(time_estimate) = txinfo.metadata.time_estimate_ms { if time_now.saturating_add(time_estimate.into()) > deadline { - debug!("Mining tx would cause us to exceed our deadline, skipping"; + info!("Mining tx would cause us to exceed our deadline, skipping"; "txid" => %txinfo.tx.txid(), "deadline" => deadline, "now" => time_now, @@ -2360,40 +2395,6 @@ impl StacksBlockBuilder { )); } - if let Some(nonce) = mined_origin_nonces.get(&txinfo.tx.origin_address()) { - if *nonce >= txinfo.tx.get_origin_nonce() { - return Ok(Some( - TransactionResult::skipped( - &txinfo.tx, - format!( - "Bad origin nonce, tx nonce {} versus {}.", - txinfo.tx.get_origin_nonce(), - *nonce - ), - ) - .convert_to_event(), - )); - } - } - if let Some(sponsor_addr) = txinfo.tx.sponsor_address() { - if let Some(nonce) = mined_sponsor_nonces.get(&sponsor_addr) { - if let Some(sponsor_nonce) = txinfo.tx.get_sponsor_nonce() { - if *nonce >= sponsor_nonce { - return Ok(Some( - TransactionResult::skipped( - &txinfo.tx, - format!( - "Bad sponsor nonce, tx nonce {} versus {}.", - sponsor_nonce, *nonce - ), - ) - .convert_to_event(), - )); - } - } - } - } - considered.insert(txinfo.tx.txid()); num_considered += 1; @@ -2407,6 +2408,7 @@ impl StacksBlockBuilder { txinfo.metadata.len, &block_limit_hit, ast_rules, + settings.max_execution_time, ); let result_event = tx_result.convert_to_event(); @@ -2445,15 +2447,7 @@ impl StacksBlockBuilder { "error" => ?e); } } - mined_origin_nonces.insert( - txinfo.tx.origin_address(), - txinfo.tx.get_origin_nonce(), - ); - if let (Some(sponsor_addr), Some(sponsor_nonce)) = - (txinfo.tx.sponsor_address(), txinfo.tx.get_sponsor_nonce()) - { - mined_sponsor_nonces.insert(sponsor_addr, sponsor_nonce); - } + if soft_limit_reached { // done mining -- our soft limit execution budget is exceeded. // Make the block from the transactions we did manage to get @@ -2484,9 +2478,7 @@ impl StacksBlockBuilder { } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT { - debug!( - "Stop mining anchored block due to limit exceeded" - ); + info!("Miner stopping due to limit reached"); block_limit_hit = BlockLimitFunction::LIMIT_REACHED; return Ok(None); } @@ -2652,6 +2644,7 @@ impl StacksBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); + mempool.reset_mempool_caches()?; let (blocked, tx_events) = match Self::select_and_apply_transactions( &mut epoch_tx, &mut builder, @@ -2732,6 +2725,7 @@ impl BlockBuilder for StacksBlockBuilder { tx_len: u64, limit_behavior: &BlockLimitFunction, ast_rules: ASTRules, + _max_execution_time: Option, ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { return TransactionResult::skipped_due_to_error(tx, Error::BlockTooBigError); @@ -2797,7 +2791,7 @@ impl BlockBuilder for StacksBlockBuilder { return TransactionResult::problematic(tx, Error::NetError(e)); } let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, + clarity_tx, tx, quiet, ast_rules, None, ) { Ok((fee, receipt)) => (fee, receipt), Err(e) => { @@ -2858,7 +2852,7 @@ impl BlockBuilder for StacksBlockBuilder { self.txs.push(tx.clone()); self.total_anchored_fees += fee; - TransactionResult::success(tx, fee, receipt) + TransactionResult::success(tx, receipt) } else { // building up the microblocks if tx.anchor_mode != TransactionAnchorMode::OffChainOnly @@ -2887,7 +2881,7 @@ impl BlockBuilder for StacksBlockBuilder { return TransactionResult::problematic(tx, Error::NetError(e)); } let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, + clarity_tx, tx, quiet, ast_rules, None, ) { Ok((fee, receipt)) => (fee, receipt), Err(e) => { @@ -2948,7 +2942,7 @@ impl BlockBuilder for StacksBlockBuilder { self.micro_txs.push(tx.clone()); self.total_streamed_fees += fee; - TransactionResult::success(tx, fee, receipt) + TransactionResult::success(tx, receipt) }; self.bytes_so_far += tx_len; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 0e70321784c..aabad63b33d 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -30,6 +30,7 @@ use clarity::vm::costs::LimitedCostTracker; use clarity::vm::database::ClarityDatabase; use clarity::vm::test_util::TEST_BURN_STATE_DB; use clarity::vm::types::*; +use mempool::MemPoolWalkStrategy; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; use rusqlite::params; @@ -4852,7 +4853,7 @@ fn mempool_walk_test_users_10_rounds_3_cache_size_2000_null_prob_100() { fn paramaterized_mempool_walk_test( num_users: usize, num_rounds: usize, - nonce_and_candidate_cache_size: u64, + nonce_and_candidate_cache_size: usize, consider_no_estimate_tx_prob: u8, timeout_ms: u128, ) { @@ -4999,7 +5000,6 @@ fn paramaterized_mempool_walk_test( // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -5027,3 +5027,246 @@ fn paramaterized_mempool_walk_test( }, ); } + +#[test] +/// Test that the mempool walk query ignores old nonces and prefers next possible nonces before higher global fees. +fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { + let key_address_pairs: Vec<_> = (0..7) + .map(|_user_index| { + let privk = StacksPrivateKey::random(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + (privk, addr) + }) + .collect(); + let accounts: Vec = key_address_pairs + .iter() + .map(|(_, b)| b.to_string()) + .collect(); + let address_0 = accounts[0].to_string(); + let address_1 = accounts[1].to_string(); + let address_2 = accounts[2].to_string(); + let address_3 = accounts[3].to_string(); + let address_4 = accounts[4].to_string(); + let address_5 = accounts[5].to_string(); + let address_6 = accounts[6].to_string(); + + let test_name = function_name!(); + let mut peer_config = TestPeerConfig::new(&test_name, 0, 0); + peer_config.initial_balances = vec![]; + for (privk, addr) in &key_address_pairs { + peer_config + .initial_balances + .push((addr.to_account_principal(), 1000000000)); + } + + let recipient = + StacksAddress::from_string("ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV").unwrap(); + + let mut chainstate = + instantiate_chainstate_with_balances(false, 0x80000000, &test_name, vec![]); + let chainstate_path = chainstate_path(&test_name); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + let b_1 = make_block( + &mut chainstate, + ConsensusHash([0x1; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 1, + 1, + ); + let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); + + let mut tx_events = Vec::new(); + + // Simulate next possible nonces for **some** addresses. Leave some blank so we can test the case where the nonce cannot be + // found on the db table and has to be pulled from the MARF. + let mempool_tx = mempool.tx_begin().unwrap(); + mempool_tx + .execute( + "INSERT INTO nonces (address, nonce) VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?)", + params![address_0, 2, address_1, 1, address_2, 6, address_4, 1, address_5, 0], + ) + .unwrap(); + mempool_tx.commit().unwrap(); + + // Test transactions with a wide variety of origin/sponsor configurations and fee rate values. Some transactions do not have a + // sponsor, some others do, and some others are sponsored by other sponsors. All will be in flight at the same time. + // + // tuple shape: (origin_address_index, origin_nonce, sponsor_address_index, sponsor_nonce, fee_rate) + let test_vectors = vec![ + (0, 0, 0, 0, 100.0), // Old origin nonce - ignored + (0, 1, 0, 1, 200.0), // Old origin nonce - ignored + (0, 2, 0, 2, 300.0), + (0, 3, 0, 3, 400.0), + (0, 4, 3, 0, 500.0), // Nonce 0 for address 3 is not in the table but will be valid on MARF + (1, 0, 1, 0, 400.0), // Old origin nonce - ignored + (1, 1, 3, 1, 600.0), + (1, 2, 3, 2, 700.0), + (1, 3, 3, 3, 800.0), + (1, 4, 1, 4, 1200.0), + (2, 3, 2, 3, 9000.0), // Old origin nonce - ignored + (2, 4, 2, 4, 9000.0), // Old origin nonce - ignored + (2, 5, 2, 5, 9000.0), // Old origin nonce - ignored + (2, 6, 4, 0, 900.0), // Old sponsor nonce - ignored + (2, 6, 4, 1, 1000.0), + (2, 7, 4, 2, 800.0), + (2, 8, 2, 8, 1000.0), + (2, 9, 3, 5, 1000.0), + (2, 10, 3, 6, 1500.0), + (3, 4, 3, 4, 100.0), + (4, 3, 5, 2, 550.0), + (5, 0, 5, 0, 500.0), + (5, 1, 5, 1, 500.0), + (5, 3, 4, 4, 2000.0), + (5, 4, 4, 5, 2000.0), + (6, 2, 6, 2, 1000.0), // Address has nonce 0 in MARF - ignored + ]; + for (origin_index, origin_nonce, sponsor_index, sponsor_nonce, fee_rate) in + test_vectors.into_iter() + { + // Create tx, either standard or sponsored + let mut tx = if origin_index != sponsor_index { + let payload = TransactionPayload::TokenTransfer( + recipient.to_account_principal(), + 1, + TokenTransferMemo([0; 34]), + ); + sign_sponsored_singlesig_tx( + payload.into(), + &key_address_pairs[origin_index].0, + &key_address_pairs[sponsor_index].0, + origin_nonce, + sponsor_nonce, + 200, + ) + } else { + make_user_stacks_transfer( + &key_address_pairs[origin_index].0, + origin_nonce, + 200, + &recipient.to_account_principal(), + 1, + ) + }; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + + let origin_address = tx.origin_address(); + let sponsor_address = tx.sponsor_address().unwrap_or(origin_address); + tx.set_tx_fee(fee_rate as u64); + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let tx_fee = tx.get_tx_fee(); + let height = 100; + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &b_1.0, + &b_1.1, + true, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + mempool_tx + .execute( + "UPDATE mempool SET fee_rate = ? WHERE txid = ?", + params![Some(fee_rate), &txid], + ) + .unwrap(); + + mempool_tx.commit().unwrap(); + } + + // Visit transactions using the `NextNonceWithHighestFeeRate` strategy. Keep a record of the order of visits so we can compare + // at the end. + let mut mempool_settings = MemPoolWalkSettings::default(); + mempool_settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + let mut considered_txs = vec![]; + let deadline = get_epoch_time_ms() + 30000; + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), + |clarity_conn| { + loop { + if mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + mempool_settings.clone(), + |_, available_tx, _| { + considered_txs.push(( + available_tx.tx.metadata.origin_address.to_string(), + available_tx.tx.metadata.origin_nonce, + available_tx.tx.metadata.sponsor_address.to_string(), + available_tx.tx.metadata.sponsor_nonce, + available_tx.tx.metadata.tx_fee, + )); + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::ZERO, + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap() + .0 + == 0 + { + break; + } + assert!(get_epoch_time_ms() < deadline, "test timed out"); + } + + // Expected transaction consideration order, sorted by mineable first (next origin+sponsor nonces, highest fee). + // Ignores old and very future nonces. + let expected_tx_order = vec![ + (address_2.clone(), 6, address_4.clone(), 1, 1000), // Round 1 + (address_5.clone(), 0, address_5.clone(), 0, 500), + (address_0.clone(), 2, address_0.clone(), 2, 300), + (address_2.clone(), 7, address_4.clone(), 2, 800), // Round 2 + (address_5.clone(), 1, address_5.clone(), 1, 500), + (address_0.clone(), 3, address_0.clone(), 3, 400), + (address_2.clone(), 8, address_2.clone(), 8, 1000), // Round 3 + (address_4.clone(), 3, address_5.clone(), 2, 550), + (address_0.clone(), 4, address_3.clone(), 0, 500), + (address_5.clone(), 3, address_4.clone(), 4, 2000), // Round 4 + (address_1.clone(), 1, address_3.clone(), 1, 600), + (address_5.clone(), 4, address_4.clone(), 5, 2000), // Round 5 + (address_1.clone(), 2, address_3.clone(), 2, 700), + (address_1.clone(), 3, address_3.clone(), 3, 800), // Round 6 + (address_1.clone(), 4, address_1.clone(), 4, 1200), // Round 7 + (address_3.clone(), 4, address_3.clone(), 4, 100), + (address_2.clone(), 9, address_3.clone(), 5, 1000), // Round 8 + (address_2.clone(), 10, address_3.clone(), 6, 1500), // Round 9 + ]; + assert_eq!( + considered_txs, expected_tx_order, + "Mempool should visit transactions in the correct order while ignoring past nonces", + ); + }, + ); +} diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index 0ad4549ecd3..e91b00bad1a 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -2734,7 +2734,12 @@ pub fn mine_empty_anchored_block( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2769,7 +2774,12 @@ pub fn mine_empty_anchored_block_with_burn_height_pubkh( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2804,7 +2814,12 @@ pub fn mine_empty_anchored_block_with_stacks_height_pubkh( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2835,7 +2850,12 @@ pub fn mine_invalid_token_transfers_block( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let recipient = @@ -2909,7 +2929,12 @@ pub fn mine_smart_contract_contract_call_block( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); // make a smart contract @@ -2919,7 +2944,12 @@ pub fn mine_smart_contract_contract_call_block( builder.header.total_work.work as usize, ); builder - .try_mine_tx(clarity_tx, &tx_contract_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_contract_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); // make a contract call @@ -2931,7 +2961,12 @@ pub fn mine_smart_contract_contract_call_block( 2, ); builder - .try_mine_tx(clarity_tx, &tx_contract_call_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_contract_call_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2986,7 +3021,12 @@ pub fn mine_smart_contract_block_contract_call_microblock( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); // make a smart contract @@ -2996,7 +3036,12 @@ pub fn mine_smart_contract_block_contract_call_microblock( builder.header.total_work.work as usize, ); builder - .try_mine_tx(clarity_tx, &tx_contract_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_contract_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -3073,7 +3118,12 @@ pub fn mine_smart_contract_block_contract_call_microblock_exception( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); // make a smart contract @@ -3083,7 +3133,12 @@ pub fn mine_smart_contract_block_contract_call_microblock_exception( builder.header.total_work.work as usize, ); builder - .try_mine_tx(clarity_tx, &tx_contract_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_contract_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index f550f8b032e..81a143ac116 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1363,6 +1363,37 @@ pub fn sign_standard_singlesig_tx( tx_signer.get_tx().unwrap() } +pub fn sign_sponsored_singlesig_tx( + payload: TransactionPayload, + origin: &StacksPrivateKey, + sponsor: &StacksPrivateKey, + origin_nonce: u64, + sponsor_nonce: u64, + tx_fee: u64, +) -> StacksTransaction { + let mut origin_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(origin)) + .expect("Failed to create p2pkh spending condition from public key."); + origin_spending_condition.set_nonce(origin_nonce); + origin_spending_condition.set_tx_fee(tx_fee); + let mut sponsored_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sponsor)) + .expect("Failed to create p2pkh spending condition from public key."); + sponsored_spending_condition.set_nonce(sponsor_nonce); + sponsored_spending_condition.set_tx_fee(tx_fee); + let auth = TransactionAuth::Sponsored(origin_spending_condition, sponsored_spending_condition); + let mut unsigned_tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + + unsigned_tx.chain_id = 0x80000000; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + tx_signer.sign_origin(origin).unwrap(); + tx_signer.sign_sponsor(sponsor).unwrap(); + + tx_signer.get_tx().unwrap() +} + pub fn get_stacks_account(peer: &mut TestPeer, addr: &PrincipalData) -> StacksAccount { let account = peer .with_db_state(|ref mut sortdb, ref mut chainstate, _, _| { diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 4d413200cc1..f73593f4dd2 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -387,6 +387,7 @@ impl ClarityInstance { BOOT_CODE_COSTS, None, |_, _| false, + None, ) .unwrap(); }); @@ -408,6 +409,7 @@ impl ClarityInstance { &*BOOT_CODE_COST_VOTING, None, |_, _| false, + None, ) .unwrap(); @@ -433,6 +435,7 @@ impl ClarityInstance { &*BOOT_CODE_POX_TESTNET, None, |_, _| false, + None, ) .unwrap(); }); @@ -484,6 +487,7 @@ impl ClarityInstance { BOOT_CODE_COSTS_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -505,6 +509,7 @@ impl ClarityInstance { BOOT_CODE_COSTS_3, None, |_, _| false, + None, ) .unwrap(); }); @@ -526,6 +531,7 @@ impl ClarityInstance { &*POX_2_TESTNET_CODE, None, |_, _| false, + None, ) .unwrap(); }); @@ -878,6 +884,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &costs_2_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process PoX 2 contract initialization"); @@ -991,6 +998,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_2_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process PoX 2 contract initialization"); @@ -1012,6 +1020,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { "set-burnchain-parameters", ¶ms, |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX-2 contract"); @@ -1062,6 +1071,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &costs_3_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process costs-3 contract initialization"); @@ -1232,6 +1242,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_3_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process PoX 3 contract initialization"); @@ -1253,6 +1264,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { "set-burnchain-parameters", ¶ms, |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX-3 contract"); @@ -1349,6 +1361,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_4_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process PoX 4 contract initialization"); @@ -1369,6 +1382,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { "set-burnchain-parameters", ¶ms, |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX-3 contract"); @@ -1407,6 +1421,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &signers_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process .signers contract initialization"); receipt @@ -1453,6 +1468,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &signers_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process .signers DB contract initialization"); receipt @@ -1493,6 +1509,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &signers_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process .signers-voting contract initialization"); receipt @@ -2040,6 +2057,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2093,6 +2111,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); tx.save_analysis(&contract_identifier, &ct_analysis) @@ -2121,6 +2140,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); tx.save_analysis(&contract_identifier, &ct_analysis) @@ -2152,7 +2172,8 @@ mod tests { &ct_ast, contract, None, - |_, _| false + |_, _| false, + None ) .unwrap_err() ) @@ -2205,6 +2226,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2218,7 +2240,8 @@ mod tests { &contract_identifier, "foo", &[Value::Int(1)], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2265,6 +2288,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2357,6 +2381,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2488,6 +2513,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2501,7 +2527,8 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2515,7 +2542,8 @@ mod tests { &contract_identifier, "set-bar", &[Value::Int(1), Value::Int(1)], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2531,6 +2559,7 @@ mod tests { "set-bar", &[Value::Int(10), Value::Int(1)], |_, _| true, + None, ) }) .unwrap_err(); @@ -2550,7 +2579,8 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2565,7 +2595,8 @@ mod tests { &contract_identifier, "set-bar", &[Value::Int(10), Value::Int(0)], - |_, _| true + |_, _| true, + None )) .unwrap_err() ) @@ -2579,7 +2610,8 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2690,6 +2722,7 @@ mod tests { &tx1, &account, ASTRules::PrecheckSize, + None, ) .unwrap(); assert!(receipt.post_condition_aborted); @@ -2700,6 +2733,7 @@ mod tests { &tx2, &account, ASTRules::PrecheckSize, + None, ) .unwrap(); }); @@ -2710,6 +2744,7 @@ mod tests { &tx3, &account, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2870,6 +2905,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2893,7 +2929,8 @@ mod tests { &contract_identifier, "do-expand", &[], - |_, _| false + |_, _| false, + None )) .unwrap_err() { diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index b60d20e34a6..0114cc34184 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -132,6 +132,7 @@ fn setup_tracked_cost_test( contract_trait, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&trait_contract_id, &ct_analysis) @@ -165,6 +166,7 @@ fn setup_tracked_cost_test( contract_other, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&other_contract_id, &ct_analysis) @@ -240,6 +242,7 @@ fn test_tracked_costs( &contract_self, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&self_contract_id, &ct_analysis).unwrap(); diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 75f14fcc490..16b83146c78 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -132,6 +132,7 @@ fn test_get_burn_block_info_eval() { contract, None, |_, _| false, + None, ) .unwrap(); }); @@ -247,7 +248,7 @@ fn test_get_block_info_eval_v210() { .analyze_smart_contract(&contract_identifier, clarity_version, contract, ASTRules::PrecheckSize) .unwrap(); clarity_db - .initialize_smart_contract(&contract_identifier, clarity_version, &ast, contract, None, |_, _| false) + .initialize_smart_contract(&contract_identifier, clarity_version, &ast, contract, None, |_, _| false, None) .unwrap(); }); let mut tx = conn.start_transaction_processing(); @@ -326,7 +327,15 @@ fn publish_contract( bc.as_transaction(|tx| { let (ast, analysis) = tx.analyze_smart_contract(contract_id, version, contract, ASTRules::PrecheckSize)?; - tx.initialize_smart_contract(contract_id, version, &ast, contract, None, |_, _| false)?; + tx.initialize_smart_contract( + contract_id, + version, + &ast, + contract, + None, + |_, _| false, + None, + )?; tx.save_analysis(contract_id, &analysis)?; Ok(()) }) @@ -448,6 +457,7 @@ fn trait_invocation_cross_epoch() { "invocation-1", &[], |_, _| false, + None, ) .unwrap(); }); @@ -465,7 +475,7 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-1", &[], - |_, _| false, + |_, _| false, None ) .unwrap_err(); @@ -488,7 +498,7 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-2", &[Value::Principal(impl_contract_id.clone().into())], - |_, _| false, + |_, _| false, None ) .unwrap_err(); @@ -513,6 +523,7 @@ fn trait_invocation_cross_epoch() { "invocation-1", &[], |_, _| false, + None, ) .unwrap(); }); @@ -530,6 +541,7 @@ fn trait_invocation_cross_epoch() { "invocation-2", &[Value::Principal(impl_contract_id.clone().into())], |_, _| false, + None, ) .unwrap(); }); @@ -613,6 +625,7 @@ fn trait_with_trait_invocation_cross_epoch() { math_trait, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -637,6 +650,7 @@ fn trait_with_trait_invocation_cross_epoch() { compute_trait, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -661,6 +675,7 @@ fn trait_with_trait_invocation_cross_epoch() { impl_compute, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -685,6 +700,7 @@ fn trait_with_trait_invocation_cross_epoch() { impl_math, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -709,6 +725,7 @@ fn trait_with_trait_invocation_cross_epoch() { use_compute, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -740,6 +757,7 @@ fn trait_with_trait_invocation_cross_epoch() { use_compute, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -764,6 +782,7 @@ fn trait_with_trait_invocation_cross_epoch() { use_compute, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -784,6 +803,7 @@ fn trait_with_trait_invocation_cross_epoch() { "do-it-static", &[], |_, _| false, + None, ) .unwrap(); }); @@ -804,6 +824,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::UInt(1), ], |_, _| false, + None, ) .unwrap(); }); @@ -821,6 +842,7 @@ fn trait_with_trait_invocation_cross_epoch() { "do-it-static", &[], |_, _| false, + None, ) .unwrap(); }); @@ -841,6 +863,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::UInt(1), ], |_, _| false, + None, ) .unwrap(); }); @@ -858,6 +881,7 @@ fn trait_with_trait_invocation_cross_epoch() { "do-it-static", &[], |_, _| false, + None, ) .unwrap(); }); @@ -878,6 +902,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::UInt(1), ], |_, _| false, + None, ) .unwrap(); }); @@ -946,6 +971,7 @@ fn test_block_heights() { contract_clarity1, None, |_, _| false, + None ).unwrap(); // analyze the contracts as Clarity 2 @@ -1005,6 +1031,7 @@ fn test_block_heights() { contract_clarity3, None, |_, _| false, + None ).unwrap(); }); @@ -1224,6 +1251,7 @@ fn test_block_heights_across_versions() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1254,6 +1282,7 @@ fn test_block_heights_across_versions() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1286,6 +1315,7 @@ fn test_block_heights_across_versions() { &contract_e3c3, None, |_, _| false, + None, ) .unwrap(); }); @@ -1353,6 +1383,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1380,6 +1411,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1412,6 +1444,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { &contract_e3c3, None, |_, _| false, + None, ) .unwrap(); }); @@ -1428,6 +1461,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { "get-it", &[Value::Principal(contract_id_e3c3.clone().into())], |_, _| false, + None, ) .unwrap(); assert_eq!(Value::okay(Value::UInt(20)).unwrap(), res1.0); @@ -1440,6 +1474,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { "get-it", &[Value::Principal(contract_id_e3c3.clone().into())], |_, _| false, + None, ) .unwrap(); assert_eq!(Value::okay(Value::UInt(20)).unwrap(), res2.0); @@ -1496,6 +1531,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1523,6 +1559,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1555,6 +1592,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { &contract_e3c3, None, |_, _| false, + None, ) .unwrap(); }); @@ -1571,6 +1609,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { "get-it", &[Value::Principal(contract_id_e2c1.clone().into())], |_, _| false, + None, ) .unwrap(); assert_eq!(Value::okay(Value::UInt(777)).unwrap(), res1.0); @@ -1583,6 +1622,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { "get-it", &[Value::Principal(contract_id_e2c2.clone().into())], |_, _| false, + None, ) .unwrap(); assert_eq!(Value::okay(Value::UInt(777)).unwrap(), res2.0); @@ -1629,6 +1669,7 @@ fn test_block_heights_at_block() { contract, None, |_, _| false, + None ).unwrap(); }); @@ -1691,6 +1732,7 @@ fn test_get_block_info_time() { contract2, None, |_, _| false, + None, ) .unwrap(); @@ -1713,6 +1755,7 @@ fn test_get_block_info_time() { contract3, None, |_, _| false, + None, ) .unwrap(); @@ -1735,6 +1778,7 @@ fn test_get_block_info_time() { contract3_3, None, |_, _| false, + None, ) .unwrap(); }); diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 6868d11b654..fe30fb6c116 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -24,8 +24,9 @@ use clarity::vm::contexts::{ use clarity::vm::contracts::Contract; use clarity::vm::costs::cost_functions::ClarityCostFunction; use clarity::vm::costs::{ - parse_cost, ClarityCostFunctionEvaluator, ClarityCostFunctionReference, CostErrors, - DefaultVersion, ExecutionCost, LimitedCostTracker, COSTS_1_NAME, COSTS_2_NAME, COSTS_3_NAME, + compute_cost, parse_cost, ClarityCostFunctionEvaluator, ClarityCostFunctionReference, + CostErrors, DefaultVersion, ExecutionCost, LimitedCostTracker, COSTS_1_NAME, COSTS_2_NAME, + COSTS_3_NAME, }; use clarity::vm::database::{ClarityDatabase, MemoryBackingStore}; use clarity::vm::errors::{CheckErrors, Error, RuntimeErrorType}; @@ -885,19 +886,16 @@ fn eval_cost_fn( let mainnet = owned_env.is_mainnet(); let boot_costs_id = boot_code_id(cost_contract_name, mainnet); let cost_fn_name = cost_fn.get_name_str(); - - let exec = format!("({cost_fn_name} u{argument})"); - - let exec_result = owned_env - .eval_read_only(&boot_costs_id, &exec) - .map(|(value, _, _)| Some(value)); - + let cost_tracker = owned_env.mut_cost_tracker(); + let data = match cost_tracker { + LimitedCostTracker::Free => panic!(), + LimitedCostTracker::Limited(data) => data, + }; let clarity_cost_fn_ref = ClarityCostFunctionReference { contract_id: boot_costs_id, function_name: cost_fn_name.to_string(), }; - - parse_cost(&clarity_cost_fn_ref.to_string(), exec_result) + compute_cost(data, clarity_cost_fn_ref, &[argument], data.epoch) } fn eval_replaced_cost_fn( @@ -926,7 +924,13 @@ fn proptest_cost_fn(cost_fn: &ClarityCostFunction, cost_contract_name: &str) { inputs.push(2u64.pow(i) + 1); }); for use_mainnet in [true, false] { - with_owned_env(StacksEpochId::latest(), use_mainnet, |mut owned_env| { + let epoch = match cost_contract_name { + COSTS_1_NAME => StacksEpochId::Epoch20, + COSTS_2_NAME => StacksEpochId::Epoch2_05, + COSTS_3_NAME => StacksEpochId::latest(), + _ => panic!(), + }; + with_owned_env(epoch, use_mainnet, |mut owned_env| { for i in inputs.iter() { eprintln!("Evaluating {cost_contract_name}.{cost_fn}({i})"); let clar_evaled = eval_cost_fn(&mut owned_env, cost_contract_name, cost_fn, *i); @@ -1180,6 +1184,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity contract_src, None, |_, _| false, + None, ) .unwrap(); tx.save_analysis(contract_name, &analysis).unwrap(); @@ -1464,6 +1469,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi contract_src, None, |_, _| false, + None, ) .unwrap(); tx.save_analysis(contract_name, &analysis).unwrap(); diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 7124ce571b5..404b6faef82 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -160,6 +160,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac BOOT_CODE_COSTS_2, None, |_, _| false, + None, ) .unwrap(); } @@ -185,6 +186,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac BOOT_CODE_COSTS_3, None, |_, _| false, + None, ) .unwrap(); } @@ -222,6 +224,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac tokens_contract, None, |_, _| false, + None, ) .unwrap() }); @@ -234,7 +237,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p1.clone().into(), Value::UInt(210)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -247,7 +251,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p2.clone().into(), Value::UInt(9000)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -261,7 +266,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p2.clone().into(), Value::UInt(1001)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -269,7 +275,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac assert!(is_committed( & // send to self! block.as_transaction(|tx| tx.run_contract_call(&p1, None, &contract_identifier, "token-transfer", - &[p1.clone().into(), Value::UInt(1000)], |_, _| false)).unwrap().0 + &[p1.clone().into(), Value::UInt(1000)], |_, _| false, None)).unwrap().0 )); assert_eq!( @@ -299,7 +305,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -313,7 +320,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -327,7 +335,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -351,7 +360,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "mint-after", &[Value::UInt(25)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -388,7 +398,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "mint-after", &[Value::UInt(25)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -402,7 +413,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -425,7 +437,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "my-get-token-balance", &[p1.clone().into()], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -747,7 +760,8 @@ pub fn rollback_log_memory_test( &ct_ast, &contract, None, - |_, _| { false } + |_, _| { false }, + None ) .unwrap_err() ) @@ -823,7 +837,8 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id &ct_ast, &contract, None, - |_, _| { false } + |_, _| { false }, + None ) .unwrap_err() ) @@ -902,7 +917,8 @@ pub fn argument_memory_test( &ct_ast, &contract, None, - |_, _| { false } + |_, _| { false }, + None ) .unwrap_err() ) @@ -997,7 +1013,8 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_ast, &contract_ok, None, - |_, _| true + |_, _| true, + None ) .unwrap_err() { @@ -1023,7 +1040,8 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_ast, &contract_err, None, - |_, _| false + |_, _| false, + None ) .unwrap_err() ) @@ -1110,6 +1128,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -1133,7 +1152,8 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_ast, &contract, None, - |_, _| false + |_, _| false, + None ) .unwrap_err() ) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 937c90ebdc8..25c718c7a18 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -46,7 +46,7 @@ use crate::chainstate::stacks::index::storage::TrieHashCalculationMode; use crate::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; use crate::chainstate::stacks::MAX_BLOCK_LEN; use crate::config::chain_data::MinerStats; -use crate::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; +use crate::core::mempool::{MemPoolWalkSettings, MemPoolWalkStrategy, MemPoolWalkTxTypes}; use crate::core::{ MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, @@ -1093,6 +1093,7 @@ impl Config { BlockBuilderSettings { max_miner_time_ms: miner_config.nakamoto_attempt_time_ms, mempool_settings: MemPoolWalkSettings { + strategy: miner_config.mempool_walk_strategy, max_walk_time_ms: miner_config.nakamoto_attempt_time_ms, consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, nonce_cache_size: miner_config.nonce_cache_size, @@ -1104,6 +1105,9 @@ impl Config { }, miner_status, confirm_microblocks: false, + max_execution_time: miner_config + .max_execution_time_secs + .map(Duration::from_secs), } } @@ -1136,6 +1140,7 @@ impl Config { // second or later attempt to mine a block -- give it some time miner_config.subsequent_attempt_time_ms }, + strategy: miner_config.mempool_walk_strategy, consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, nonce_cache_size: miner_config.nonce_cache_size, candidate_retry_cache_size: miner_config.candidate_retry_cache_size, @@ -1146,6 +1151,9 @@ impl Config { }, miner_status, confirm_microblocks: true, + max_execution_time: miner_config + .max_execution_time_secs + .map(Duration::from_secs), } } @@ -1197,7 +1205,7 @@ impl std::default::Default for Config { } } -#[derive(Clone, Debug, Default, Deserialize)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq)] pub struct BurnchainConfig { pub chain: String, pub mode: String, @@ -2113,6 +2121,8 @@ pub struct MinerConfig { pub microblock_attempt_time_ms: u64, /// Max time to assemble Nakamoto block pub nakamoto_attempt_time_ms: u64, + /// Strategy to follow when picking next mempool transactions to consider. + pub mempool_walk_strategy: MemPoolWalkStrategy, pub probability_pick_no_estimate_tx: u8, pub block_reward_recipient: Option, /// If possible, mine with a p2wpkh address @@ -2120,8 +2130,8 @@ pub struct MinerConfig { /// Wait for a downloader pass before mining. /// This can only be disabled in testing; it can't be changed in the config file. pub wait_for_block_download: bool, - pub nonce_cache_size: u64, - pub candidate_retry_cache_size: u64, + pub nonce_cache_size: usize, + pub candidate_retry_cache_size: usize, pub unprocessed_block_deadline_secs: u64, pub mining_key: Option, /// Amount of time while mining in nakamoto to wait in between mining interim blocks @@ -2177,6 +2187,8 @@ pub struct MinerConfig { pub tenure_extend_cost_threshold: u64, /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections pub block_rejection_timeout_steps: HashMap, + /// Define max execution time for contract calls: transactions taking more than the specified amount of seconds will be rejected + pub max_execution_time_secs: Option, } impl Default for MinerConfig { @@ -2202,6 +2214,7 @@ impl Default for MinerConfig { activated_vrf_key_path: None, fast_rampup: false, underperform_stop_threshold: None, + mempool_walk_strategy: MemPoolWalkStrategy::GlobalFeeRate, txs_to_consider: MemPoolWalkTxTypes::all(), filter_origins: HashSet::new(), max_reorg_depth: 3, @@ -2226,6 +2239,7 @@ impl Default for MinerConfig { rejections_timeouts_default_map.insert(30, Duration::from_secs(0)); rejections_timeouts_default_map }, + max_execution_time_secs: None, } } } @@ -2596,11 +2610,12 @@ pub struct MinerConfigFile { pub subsequent_attempt_time_ms: Option, pub microblock_attempt_time_ms: Option, pub nakamoto_attempt_time_ms: Option, + pub mempool_walk_strategy: Option, pub probability_pick_no_estimate_tx: Option, pub block_reward_recipient: Option, pub segwit: Option, - pub nonce_cache_size: Option, - pub candidate_retry_cache_size: Option, + pub nonce_cache_size: Option, + pub candidate_retry_cache_size: Option, pub unprocessed_block_deadline_secs: Option, pub mining_key: Option, pub wait_on_interim_blocks_ms: Option, @@ -2625,6 +2640,7 @@ pub struct MinerConfigFile { pub tenure_timeout_secs: Option, pub tenure_extend_cost_threshold: Option, pub block_rejection_timeout_steps: Option>, + pub max_execution_time_secs: Option, } impl MinerConfigFile { @@ -2658,6 +2674,14 @@ impl MinerConfigFile { } else { miner_default_config.tenure_cost_limit_per_block_percentage }; + + let nonce_cache_size = self + .nonce_cache_size + .unwrap_or(miner_default_config.nonce_cache_size); + if nonce_cache_size == 0 { + return Err("miner.nonce_cache_size must be greater than 0".to_string()); + } + Ok(MinerConfig { first_attempt_time_ms: self .first_attempt_time_ms @@ -2717,6 +2741,9 @@ impl MinerConfigFile { activated_vrf_key_path: self.activated_vrf_key_path.clone(), fast_rampup: self.fast_rampup.unwrap_or(miner_default_config.fast_rampup), underperform_stop_threshold: self.underperform_stop_threshold, + mempool_walk_strategy: self.mempool_walk_strategy + .map(|s| str::parse(&s).unwrap_or_else(|e| panic!("Could not parse '{s}': {e}"))) + .unwrap_or(MemPoolWalkStrategy::GlobalFeeRate), txs_to_consider: { if let Some(txs_to_consider) = &self.txs_to_consider { txs_to_consider @@ -2786,7 +2813,9 @@ impl MinerConfigFile { } else{ miner_default_config.block_rejection_timeout_steps } - } + }, + + max_execution_time_secs: self.max_execution_time_secs }) } } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index d21f46c3c1e..675a8ac14eb 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -15,21 +15,23 @@ // along with this program. If not, see . use std::cmp::{self, Ordering}; -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::{HashMap, HashSet, LinkedList, VecDeque}; use std::hash::Hasher; use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::{Duration, Instant, SystemTime}; -use std::{fs, io}; +use std::{fs, io, thread}; use clarity::vm::types::PrincipalData; use rand::distributions::Uniform; use rand::prelude::Distribution; +use rand::Rng; use rusqlite::types::ToSql; use rusqlite::{ - params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction, + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Statement, + Transaction, }; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{ @@ -55,6 +57,7 @@ use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksMicroblock, StacksTransaction, TransactionPayload, }; use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::nonce_cache::NonceCache; use crate::core::{ ExecutionCost, StacksEpochId, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, }; @@ -99,6 +102,9 @@ pub const DEFAULT_BLACKLIST_MAX_SIZE: u64 = 134217728; // 2**27 -- the blacklist // loading the bloom filter, even though the bloom filter is larger. const DEFAULT_MAX_TX_TAGS: u32 = 2048; +// maximum number of transactions that can fit in a single block +const MAX_BLOCK_TXS: usize = 11_650; + /// A node-specific transaction tag -- the first 8 bytes of siphash(local-seed,txid) #[derive(Debug, Clone, PartialEq, Hash, Eq)] pub struct TxTag(pub [u8; 8]); @@ -144,8 +150,11 @@ pub enum MemPoolSyncData { TxTags([u8; 32], Vec), } +#[derive(Debug, PartialEq)] pub enum MempoolIterationStopReason { + /// No more candidates in the mempool to consider NoMoreCandidates, + /// The mining deadline has been reached DeadlineReached, /// If the iteration function supplied to mempool iteration exited /// (i.e., the transaction evaluator returned an early exit command) @@ -514,20 +523,46 @@ impl MemPoolWalkTxTypes { } } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum MemPoolWalkStrategy { + /// Select transactions with the highest global fee rate. + GlobalFeeRate, + /// Select transactions with the next expected nonce for origin and sponsor addresses, + NextNonceWithHighestFeeRate, +} + +impl FromStr for MemPoolWalkStrategy { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + "GlobalFeeRate" => { + return Ok(Self::GlobalFeeRate); + } + "NextNonceWithHighestFeeRate" => { + return Ok(Self::NextNonceWithHighestFeeRate); + } + _ => { + return Err("Unknown mempool walk strategy"); + } + } + } +} + #[derive(Debug, Clone)] pub struct MemPoolWalkSettings { + /// Strategy to use when selecting the next transactions to consider in the `mempool` table. + pub strategy: MemPoolWalkStrategy, /// Maximum amount of time a miner will spend walking through mempool transactions, in /// milliseconds. This is a soft deadline. pub max_walk_time_ms: u64, /// Probability percentage to consider a transaction which has not received a cost estimate. - /// That is, with x%, when picking the next transaction to include a block, select one that - /// either failed to get a cost estimate or has not been estimated yet. + /// Only used when walk strategy is `GlobalFeeRate`. pub consider_no_estimate_tx_prob: u8, /// Size of the nonce cache. This avoids MARF look-ups. - pub nonce_cache_size: u64, + pub nonce_cache_size: usize, /// Size of the candidate cache. These are the candidates that will be retried after each /// transaction is mined. - pub candidate_retry_cache_size: u64, + pub candidate_retry_cache_size: usize, /// Types of transactions we'll consider pub txs_to_consider: HashSet, /// Origins for transactions that we'll consider @@ -540,6 +575,7 @@ pub struct MemPoolWalkSettings { impl Default for MemPoolWalkSettings { fn default() -> Self { MemPoolWalkSettings { + strategy: MemPoolWalkStrategy::GlobalFeeRate, max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, nonce_cache_size: 1024 * 1024, @@ -553,6 +589,7 @@ impl Default for MemPoolWalkSettings { impl MemPoolWalkSettings { pub fn zero() -> MemPoolWalkSettings { MemPoolWalkSettings { + strategy: MemPoolWalkStrategy::GlobalFeeRate, max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, nonce_cache_size: 1024 * 1024, @@ -803,6 +840,29 @@ const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &[&str] = &[ "#, ]; +const MEMPOOL_SCHEMA_8_NONCE_SORTING: &'static [&'static str] = &[ + r#" + -- Add table to track considered transactions + CREATE TABLE IF NOT EXISTS considered_txs( + txid TEXT PRIMARY KEY NOT NULL, + FOREIGN KEY(txid) REFERENCES mempool(txid) ON DELETE CASCADE + ); + "#, + r#" + -- Drop redundant mempool indexes, covered by unique constraints + DROP INDEX IF EXISTS "by_txid"; + DROP INDEX IF EXISTS "by_sponsor"; + DROP INDEX IF EXISTS "by_origin"; + "#, + r#" + -- Add indexes for nonce sorting + CREATE INDEX IF NOT EXISTS by_address_nonce ON nonces(address, nonce); + "#, + r#" + INSERT INTO schema_version (version) VALUES (8) + "#, +]; + const MEMPOOL_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS by_txid ON mempool(txid);", "CREATE INDEX IF NOT EXISTS by_height ON mempool(height);", @@ -997,126 +1057,7 @@ impl<'a> MemPoolTx<'a> { } } -/// Used to locally cache nonces to avoid repeatedly looking them up in the nonce. -struct NonceCache { - cache: HashMap, - /// The maximum size that this cache can be. - max_cache_size: usize, -} - -impl NonceCache { - fn new(nonce_cache_size: u64) -> Self { - let max_size: usize = nonce_cache_size - .try_into() - .expect("Could not cast `nonce_cache_size` as `usize`."); - Self { - cache: HashMap::new(), - max_cache_size: max_size, - } - } - - /// Get a nonce from the cache. - /// First, the RAM cache will be checked for this address. - /// If absent, then the `nonces` table will be queried for this address. - /// If absent, then the MARF will be queried for this address. - /// - /// If not in RAM, the nonce will be opportunistically stored to the `nonces` table. If that - /// fails due to lock contention, then the method will return `true` for its second tuple argument. - /// - /// Returns (nonce, should-try-store-again?) - fn get( - &mut self, - address: &StacksAddress, - clarity_tx: &mut C, - mempool_db: &DBConn, - ) -> (u64, bool) - where - C: ClarityConnection, - { - #[cfg(test)] - assert!(self.cache.len() <= self.max_cache_size); - - // Check in-memory cache - match self.cache.get(address) { - Some(nonce) => (*nonce, false), - None => { - // Check sqlite cache - let opt_nonce = match db_get_nonce(mempool_db, address) { - Ok(opt_nonce) => opt_nonce, - Err(e) => { - warn!("error retrieving nonce from mempool db: {}", e); - None - } - }; - match opt_nonce { - Some(nonce) => { - // Copy this into the in-memory cache if there is space - if self.cache.len() < self.max_cache_size { - self.cache.insert(address.clone(), nonce); - } - (nonce, false) - } - None => { - let nonce = - StacksChainState::get_nonce(clarity_tx, &address.clone().into()); - - let should_store_again = match db_set_nonce(mempool_db, address, nonce) { - Ok(_) => false, - Err(e) => { - debug!("error caching nonce to sqlite: {}", e); - true - } - }; - - if self.cache.len() < self.max_cache_size { - self.cache.insert(address.clone(), nonce); - } - (nonce, should_store_again) - } - } - } - } - } - - /// Store the (address, nonce) pair to the `nonces` table. - /// If storage fails, return false. - /// Otherwise return true. - fn update(&mut self, address: StacksAddress, value: u64, mempool_db: &DBConn) -> bool { - // Sqlite cache - let success = match db_set_nonce(mempool_db, &address, value) { - Ok(_) => true, - Err(e) => { - warn!("error caching nonce to sqlite: {}", e); - false - } - }; - - // In-memory cache - if let Some(nonce) = self.cache.get_mut(&address) { - *nonce = value; - } - - success - } -} - -fn db_set_nonce(conn: &DBConn, address: &StacksAddress, nonce: u64) -> Result<(), db_error> { - let addr_str = address.to_string(); - let nonce_i64 = u64_to_sql(nonce)?; - - let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; - conn.execute(sql, params![addr_str, nonce_i64])?; - Ok(()) -} - -fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, db_error> { - let addr_str = address.to_string(); - - let sql = "SELECT nonce FROM nonces WHERE address = ?"; - query_row(conn, sql, params![addr_str]) -} - -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; let mut stmt = conn.prepare(sql).map_err(db_error::SqliteError)?; @@ -1143,7 +1084,7 @@ struct CandidateCache { } impl CandidateCache { - fn new(candidate_retry_cache_size: u64) -> Self { + fn new(candidate_retry_cache_size: usize) -> Self { let max_size: usize = candidate_retry_cache_size .try_into() .expect("Could not cast `candidate_retry_cache_size` as usize."); @@ -1291,6 +1232,9 @@ impl MemPoolDB { MemPoolDB::instantiate_schema_7(tx)?; } 7 => { + MemPoolDB::instantiate_schema_8(tx)?; + } + 8 => { break; } _ => { @@ -1377,6 +1321,16 @@ impl MemPoolDB { Ok(()) } + /// Optimize indexes for mempool visits + #[cfg_attr(test, mutants::skip)] + fn instantiate_schema_8(tx: &DBTx) -> Result<(), db_error> { + for sql_exec in MEMPOOL_SCHEMA_8_NONCE_SORTING { + tx.execute_batch(sql_exec)?; + } + + Ok(()) + } + #[cfg_attr(test, mutants::skip)] pub fn db_path(chainstate_root_path: &str) -> Result { let mut path = PathBuf::from(chainstate_root_path); @@ -1486,10 +1440,12 @@ impl MemPoolDB { } #[cfg_attr(test, mutants::skip)] - pub fn reset_nonce_cache(&mut self) -> Result<(), db_error> { + pub fn reset_mempool_caches(&mut self) -> Result<(), db_error> { debug!("reset nonce cache"); - let sql = "DELETE FROM nonces"; - self.db.execute(sql, NO_PARAMS)?; + // Delete all rows from the nonces table + self.db.execute("DELETE FROM nonces", NO_PARAMS)?; + // Also delete all rows from the considered_txs table + self.db.execute("DELETE FROM considered_txs", NO_PARAMS)?; Ok(()) } @@ -1626,28 +1582,29 @@ impl MemPoolDB { { let start_time = Instant::now(); let mut total_considered = 0; + let mut considered_txs = Vec::with_capacity(MAX_BLOCK_TXS); debug!("Mempool walk for {}ms", settings.max_walk_time_ms,); + let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); + let mut nonce_conn = self.reopen(true)?; + + // == Queries for `GlobalFeeRate` mempool walk strategy + // + // Selects mempool transactions only based on their fee rate. Transactions with NULL fee rates get randomly selected for + // consideration. let tx_consideration_sampler = Uniform::new(0, 100); let mut rng = rand::thread_rng(); let mut candidate_cache = CandidateCache::new(settings.candidate_retry_cache_size); - let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); - - // set of (address, nonce) to store after the inner loop completes. This will be done in a - // single transaction. This cannot grow to more than `settings.nonce_cache_size` entries. - let mut retry_store = HashMap::new(); - let sql = " - SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate - FROM mempool - WHERE fee_rate IS NULL - "; + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate + FROM mempool + WHERE fee_rate IS NULL + "; let mut query_stmt_null = self.db.prepare(sql).map_err(Error::SqliteError)?; let mut null_iterator = query_stmt_null .query(NO_PARAMS) .map_err(Error::SqliteError)?; - let sql = " SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM mempool @@ -1659,173 +1616,245 @@ impl MemPoolDB { .query(NO_PARAMS) .map_err(Error::SqliteError)?; + // Here we have a nested loop to walk the mempool. + // + // The `GlobalFeeRate` strategy includes all transactions, so we just + // query once and walk the full mempool in the inner loop. + // + // The `NextNonceWithHighestFeeRate` strategy only selects transactions + // that have the next expected nonce, so we need to re-query the + // mempool after one batch has been processed and the nonce table has + // been updated. This is handled in the outer loop. let stop_reason = loop { - if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { - debug!("Mempool iteration deadline exceeded"; + let mut state_changed = false; + + // == Query for `NextNonceWithHighestFeeRate` mempool walk strategy + // + // Selects the next mempool transaction to consider using a heuristic that maximizes miner fee profitability and minimizes + // CPU time wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: + // + // 1. Filters out transactions to consider only those that have the next expected nonce for both the origin and sponsor, + // when possible + // 2. Adds a "simulated" fee rate to transactions that don't have it by multiplying the mempool's maximum current fee rate + // by a random number. This helps us mix these transactions with others to guarantee they get processed in a reasonable + // order + // 3. Ranks transactions by prioritizing those with next nonces and higher fees (per origin and sponsor address) + // 4. Takes the top ranked transaction and returns it for evaluation + // + // This logic prevents miners from repeatedly visiting (and then skipping) high fee transactions that would get evaluated + // first based on their `fee_rate` but are otherwise non-mineable because they have very high or invalid nonces. A large + // volume of these transactions would cause considerable slowness when selecting valid transactions to mine. This query + // also makes sure transactions that have NULL `fee_rate`s are visited, because they will also get ranked according to + // their origin address nonce. + let sql = " + WITH nonce_filtered AS ( + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, + CASE + WHEN fee_rate IS NULL THEN (ABS(RANDOM()) % 10000 / 10000.0) * (SELECT MAX(fee_rate) FROM mempool) + ELSE fee_rate + END AS sort_fee_rate + FROM mempool AS m + LEFT JOIN nonces AS no ON m.origin_address = no.address + LEFT JOIN nonces AS ns ON m.sponsor_address = ns.address + WHERE (no.address IS NULL OR m.origin_nonce = no.nonce) + AND (ns.address IS NULL OR m.sponsor_nonce = ns.nonce) + AND m.txid NOT IN (SELECT txid FROM considered_txs) + ORDER BY accept_time ASC + LIMIT 11650 -- max transactions that can fit in one block + ), + address_nonce_ranked AS ( + SELECT *, + ROW_NUMBER() OVER ( + PARTITION BY origin_address + ORDER BY origin_nonce ASC, sort_fee_rate DESC + ) AS origin_rank, + ROW_NUMBER() OVER ( + PARTITION BY sponsor_address + ORDER BY sponsor_nonce ASC, sort_fee_rate DESC + ) AS sponsor_rank + FROM nonce_filtered + ) + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate + FROM address_nonce_ranked + ORDER BY origin_rank ASC, sponsor_rank ASC, sort_fee_rate DESC + "; + let mut query_stmt_nonce_rank = self.db.prepare(sql).map_err(Error::SqliteError)?; + let mut nonce_rank_iterator = query_stmt_nonce_rank + .query(NO_PARAMS) + .map_err(Error::SqliteError)?; + + let stop_reason = loop { + if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { + debug!("Mempool: iteration deadline exceeded"; "deadline_ms" => settings.max_walk_time_ms); - break MempoolIterationStopReason::DeadlineReached; - } - - let start_with_no_estimate = - tx_consideration_sampler.sample(&mut rng) < settings.consider_no_estimate_tx_prob; - - // First, try to read from the retry list - let (candidate, update_estimate) = match candidate_cache.next() { - Some(tx) => { - let update_estimate = tx.fee_rate.is_none(); - (tx, update_estimate) + break MempoolIterationStopReason::DeadlineReached; } - None => { - // When the retry list is empty, read from the mempool db, - // randomly selecting from either the null fee-rate transactions - // or those with fee-rate estimates. - let opt_tx = if start_with_no_estimate { - null_iterator.next().map_err(Error::SqliteError)? - } else { - fee_iterator.next().map_err(Error::SqliteError)? - }; - match opt_tx { - Some(row) => (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate), - None => { - // If the selected iterator is empty, check the other - match if start_with_no_estimate { - fee_iterator.next().map_err(Error::SqliteError)? - } else { - null_iterator.next().map_err(Error::SqliteError)? - } { - Some(row) => ( - MemPoolTxInfoPartial::from_row(row)?, - !start_with_no_estimate, - ), - None => { - debug!("No more transactions to consider in mempool"); - break MempoolIterationStopReason::NoMoreCandidates; + + // First, try to read from the retry list + let (candidate, update_estimate) = match settings.strategy { + MemPoolWalkStrategy::GlobalFeeRate => { + // First, try to read from the retry list + match candidate_cache.next() { + Some(tx) => { + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) + } + None => { + // When the retry list is empty, read from the mempool db, + // randomly selecting from either the null fee-rate transactions + // or those with fee-rate estimates. + let start_with_no_estimate = tx_consideration_sampler + .sample(&mut rng) + < settings.consider_no_estimate_tx_prob; + let opt_tx = if start_with_no_estimate { + null_iterator.next().map_err(Error::SqliteError)? + } else { + fee_iterator.next().map_err(Error::SqliteError)? + }; + match opt_tx { + Some(row) => ( + MemPoolTxInfoPartial::from_row(row)?, + start_with_no_estimate, + ), + None => { + // If the selected iterator is empty, check the other + match if start_with_no_estimate { + fee_iterator.next().map_err(Error::SqliteError)? + } else { + null_iterator.next().map_err(Error::SqliteError)? + } { + Some(row) => ( + MemPoolTxInfoPartial::from_row(row)?, + !start_with_no_estimate, + ), + None => { + break MempoolIterationStopReason::NoMoreCandidates; + } + } + } } } } } - } - }; + MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { + match nonce_rank_iterator.next().map_err(Error::SqliteError)? { + Some(row) => { + let tx = MemPoolTxInfoPartial::from_row(row)?; + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) + } + None => { + break MempoolIterationStopReason::NoMoreCandidates; + } + } + } + }; - // Check the nonces. - let (expected_origin_nonce, retry_store_origin_nonce) = - nonce_cache.get(&candidate.origin_address, clarity_tx, self.conn()); - let (expected_sponsor_nonce, retry_store_sponsor_nonce) = - nonce_cache.get(&candidate.sponsor_address, clarity_tx, self.conn()); - - // Try storing these nonces later if we failed to do so here, e.g. due to some other - // thread holding the write-lock on the mempool DB. - if retry_store_origin_nonce { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, - candidate.origin_address.clone(), + state_changed = true; + + // Check the nonces. + let expected_origin_nonce = + nonce_cache.get(&candidate.origin_address, clarity_tx, &mut nonce_conn); + let expected_sponsor_nonce = + nonce_cache.get(&candidate.sponsor_address, clarity_tx, &mut nonce_conn); + + match order_nonces( + candidate.origin_nonce, expected_origin_nonce, - ); - } - if retry_store_sponsor_nonce { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, - candidate.sponsor_address.clone(), + candidate.sponsor_nonce, expected_sponsor_nonce, - ); - } - - match order_nonces( - candidate.origin_nonce, - expected_origin_nonce, - candidate.sponsor_nonce, - expected_sponsor_nonce, - ) { - Ordering::Less => { - debug!( - "Mempool: unexecutable: drop tx"; - "txid" => %candidate.txid, - "tx_origin_addr" => %candidate.origin_address, - "tx_origin_nonce" => candidate.origin_nonce, - "fee_rate" => candidate.fee_rate.unwrap_or_default(), - "expected_origin_nonce" => expected_origin_nonce, - "expected_sponsor_nonce" => expected_sponsor_nonce, - ); - // This transaction cannot execute in this pass, just drop it - continue; - } - Ordering::Greater => { - debug!( - "Mempool: nonces too high, cached for later"; - "txid" => %candidate.txid, - "tx_origin_addr" => %candidate.origin_address, - "tx_origin_nonce" => candidate.origin_nonce, - "fee_rate" => candidate.fee_rate.unwrap_or_default(), - "expected_origin_nonce" => expected_origin_nonce, - "expected_sponsor_nonce" => expected_sponsor_nonce, - ); - // This transaction could become runnable in this pass, save it for later - candidate_cache.push(candidate); - continue; - } - Ordering::Equal => { - // Candidate transaction: fall through - } - }; + ) { + Ordering::Less => { + debug!( + "Mempool: unexecutable: drop tx"; + "txid" => %candidate.txid, + "tx_origin_addr" => %candidate.origin_address, + "tx_origin_nonce" => candidate.origin_nonce, + "fee_rate" => candidate.fee_rate.unwrap_or_default(), + "expected_origin_nonce" => expected_origin_nonce, + "expected_sponsor_nonce" => expected_sponsor_nonce, + ); + // This transaction cannot execute in this pass, just drop it + continue; + } + Ordering::Greater => { + debug!( + "Mempool: nonces too high"; + "txid" => %candidate.txid, + "tx_origin_addr" => %candidate.origin_address, + "tx_origin_nonce" => candidate.origin_nonce, + "fee_rate" => candidate.fee_rate.unwrap_or_default(), + "expected_origin_nonce" => expected_origin_nonce, + "expected_sponsor_nonce" => expected_sponsor_nonce, + ); + if settings.strategy == MemPoolWalkStrategy::GlobalFeeRate { + // This transaction could become runnable in this pass, save it for later + candidate_cache.push(candidate); + } + continue; + } + Ordering::Equal => { + // Candidate transaction: fall through + } + }; + considered_txs.push(candidate.txid); - // Read in and deserialize the transaction. - let tx_info_option = MemPoolDB::get_tx(self.conn(), &candidate.txid)?; - let tx_info = match tx_info_option { - Some(tx) => tx, - None => { - // Note: Don't panic here because maybe the state has changed from garbage collection. - warn!("Miner: could not find a tx for id {:?}", &candidate.txid); - continue; - } - }; + // Read in and deserialize the transaction. + let tx_info_option = MemPoolDB::get_tx(self.conn(), &candidate.txid)?; + let tx_info = match tx_info_option { + Some(tx) => tx, + None => { + // Note: Don't panic here because maybe the state has changed from garbage collection. + warn!("Miner: could not find a tx for id {:?}", &candidate.txid); + continue; + } + }; - let (tx_type, do_consider) = match &tx_info.tx.payload { - TransactionPayload::TokenTransfer(..) => ( - "TokenTransfer".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::TokenTransfer), - ), - TransactionPayload::SmartContract(..) => ( - "SmartContract".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::SmartContract), - ), - TransactionPayload::ContractCall(..) => ( - "ContractCall".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::ContractCall), - ), - _ => ("".to_string(), true), - }; - if !do_consider { - debug!("Will skip mempool tx, since it does not have an acceptable type"; + let (tx_type, do_consider) = match &tx_info.tx.payload { + TransactionPayload::TokenTransfer(..) => ( + "TokenTransfer".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::TokenTransfer), + ), + TransactionPayload::SmartContract(..) => ( + "SmartContract".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::SmartContract), + ), + TransactionPayload::ContractCall(..) => ( + "ContractCall".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::ContractCall), + ), + _ => ("".to_string(), true), + }; + if !do_consider { + debug!("Mempool: will skip tx, since it does not have an acceptable type"; "txid" => %tx_info.tx.txid(), "type" => %tx_type); - continue; - } + continue; + } - let do_consider = settings.filter_origins.is_empty() - || settings - .filter_origins - .contains(&tx_info.metadata.origin_address); + let do_consider = settings.filter_origins.is_empty() + || settings + .filter_origins + .contains(&tx_info.metadata.origin_address); - if !do_consider { - debug!("Will skip mempool tx, since it does not have an allowed origin"; + if !do_consider { + debug!("Mempool: will skip tx, since it does not have an allowed origin"; "txid" => %tx_info.tx.txid(), "origin" => %tx_info.metadata.origin_address); - continue; - } + continue; + } - let consider = ConsiderTransaction { - tx: tx_info, - update_estimate, - }; - debug!("Consider mempool transaction"; + let consider = ConsiderTransaction { + tx: tx_info, + update_estimate, + }; + debug!("Mempool: consider transaction"; "txid" => %consider.tx.tx.txid(), "origin_addr" => %consider.tx.metadata.origin_address, "origin_nonce" => candidate.origin_nonce, @@ -1835,87 +1864,93 @@ impl MemPoolDB { "tx_fee" => consider.tx.metadata.tx_fee, "fee_rate" => candidate.fee_rate, "size" => consider.tx.metadata.len); - total_considered += 1; - - // Run `todo` on the transaction. - match todo(clarity_tx, &consider, self.cost_estimator.as_mut())? { - Some(tx_event) => { - match tx_event { - TransactionEvent::Success(_) => { - // Bump nonces in the cache for the executed transaction - let stored = nonce_cache.update( - consider.tx.metadata.origin_address, - expected_origin_nonce + 1, - self.conn(), - ); - if !stored { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, + total_considered += 1; + + // Run `todo` on the transaction. + match todo(clarity_tx, &consider, self.cost_estimator.as_mut())? { + Some(tx_event) => { + match tx_event { + TransactionEvent::Success(_) => { + // Bump nonces in the cache for the executed transaction + nonce_cache.set( consider.tx.metadata.origin_address, expected_origin_nonce + 1, + &mut nonce_conn, ); - } - - if consider.tx.tx.auth.is_sponsored() { - let stored = nonce_cache.update( - consider.tx.metadata.sponsor_address, - expected_sponsor_nonce + 1, - self.conn(), - ); - if !stored { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, + if consider.tx.tx.auth.is_sponsored() { + nonce_cache.set( consider.tx.metadata.sponsor_address, expected_sponsor_nonce + 1, + &mut nonce_conn, ); } + output_events.push(tx_event); + } + TransactionEvent::Skipped(_) => { + // don't push `Skipped` events to the observer + } + _ => { + output_events.push(tx_event); } - output_events.push(tx_event); - } - TransactionEvent::Skipped(_) => { - // don't push `Skipped` events to the observer - } - _ => { - output_events.push(tx_event); } } + None => { + debug!("Mempool: early exit from iterator"); + break MempoolIterationStopReason::IteratorExited; + } + } + + if settings.strategy == MemPoolWalkStrategy::GlobalFeeRate { + // Reset for finding the next transaction to process + debug!( + "Mempool: reset: retry list has {} entries", + candidate_cache.len() + ); + candidate_cache.reset(); } - None => { - debug!("Mempool iteration early exit from iterator"); - break MempoolIterationStopReason::IteratorExited; + }; + + // If we've reached the end of the mempool, or if we've stopped + // iterating for some other reason, break out of the loop. In the + // case of `NextNonceWithHighestFeeRate` we know we've reached the + // end of the mempool if the state has not changed. In the case of + // `GlobalFeeRate` we know we've reached the end of the mempool if + // the stop reason is `NoMoreCandidates`. + if settings.strategy != MemPoolWalkStrategy::NextNonceWithHighestFeeRate + || stop_reason != MempoolIterationStopReason::NoMoreCandidates + || !state_changed + { + if stop_reason == MempoolIterationStopReason::NoMoreCandidates { + debug!("Mempool: no more transactions to consider"); } + break stop_reason; } - // Reset for finding the next transaction to process - debug!( - "Mempool: reset: retry list has {} entries", - candidate_cache.len() - ); - candidate_cache.reset(); + // Flush the nonce cache to the database before performing the next + // query. + nonce_cache.flush(&mut nonce_conn); + + // Flush the candidate cache to the database before performing the + // next query. + flush_considered_txs(&mut nonce_conn, &mut considered_txs); }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the // connection prevents us from beginning a transaction below (which requires a mutable // borrow). drop(null_iterator); - drop(fee_iterator); drop(query_stmt_null); + drop(fee_iterator); drop(query_stmt_fee); - if !retry_store.is_empty() { - let tx = self.tx_begin()?; - for (address, nonce) in retry_store.into_iter() { - nonce_cache.update(address, nonce, &tx); - } - tx.commit()?; - } + // Write through the nonce cache to the database + nonce_cache.flush(&mut self.db); - debug!( + info!( "Mempool iteration finished"; "considered_txs" => u128::from(total_considered), - "elapsed_ms" => start_time.elapsed().as_millis() + "elapsed_ms" => start_time.elapsed().as_millis(), + "stop_reason" => ?stop_reason ); Ok((total_considered, stop_reason)) } @@ -2879,3 +2914,48 @@ impl MemPoolDB { Ok((ret, next_page, num_rows_visited)) } } + +/// Flush the considered transaction IDs to the DB. +/// Do not return until successful. After a successful flush, clear the vector. +pub fn flush_considered_txs(conn: &mut DBConn, considered_txs: &mut Vec) { + const MAX_BACKOFF: Duration = Duration::from_secs(30); + let mut backoff = Duration::from_millis(rand::thread_rng().gen_range(50..200)); + + loop { + // Pass a slice to the try function. + let result = try_flush_considered_txs(conn, considered_txs.as_slice()); + + match result { + Ok(_) => { + // On success, clear the vector so that it’s empty. + considered_txs.clear(); + return; + } + Err(e) => { + warn!("Considered txid flush failed: {e}. Retrying in {backoff:?}"); + thread::sleep(backoff); + if backoff < MAX_BACKOFF { + backoff = + backoff * 2 + Duration::from_millis(rand::thread_rng().gen_range(50..200)); + } + } + } + } +} + +/// Try to flush the considered transaction IDs to the DB. +pub fn try_flush_considered_txs( + conn: &mut DBConn, + considered_txs: &[Txid], +) -> Result<(), db_error> { + let sql = "INSERT OR IGNORE INTO considered_txs (txid) VALUES (?1)"; + + let db_tx = conn.transaction()?; + + for txid in considered_txs { + db_tx.execute(sql, params![txid])?; + } + + db_tx.commit()?; + Ok(()) +} diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 899f9d4a2fa..30e042e510b 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -30,7 +30,10 @@ use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; +pub mod nonce_cache; +#[cfg(any(test, feature = "testing"))] +pub mod test_util; #[cfg(test)] pub mod tests; diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs new file mode 100644 index 00000000000..a3fad4f843d --- /dev/null +++ b/stackslib/src/core/nonce_cache.rs @@ -0,0 +1,359 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; +use std::thread; +use std::time::Duration; + +use clarity::types::chainstate::StacksAddress; +use clarity::util::lru_cache::{FlushError, LruCache, LruCacheCorrupted}; +use clarity::vm::clarity::ClarityConnection; +use rand::Rng; +use rusqlite::params; + +use super::mempool::MemPoolTx; +use super::MemPoolDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::util_lib::db::{query_row, u64_to_sql, DBConn, Error as db_error}; + +/// Used to cache nonces in memory and in the mempool database. +/// 1. MARF - source of truth for nonces +/// 2. Nonce DB - table in mempool sqlite database +/// 3. HashMap - in-memory cache for nonces +/// The in-memory cache is restricted to a maximum size to avoid memory +/// exhaustion. When the cache is full, it should be flushed to the database +/// and cleared. It is recommended to do this in between batches of candidate +/// transactions from the mempool. +pub struct NonceCache { + /// In-memory LRU cache of nonces. + cache: LruCache, + max_size: usize, +} + +impl NonceCache { + pub fn new(max_size: usize) -> Self { + Self { + cache: LruCache::new(max_size), + max_size, + } + } + + /// Reset the cache to an empty state and clear the nonce DB. + /// This should only be called when the cache is corrupted. + fn reset_cache(&mut self, conn: &mut DBConn) { + self.cache = LruCache::new(self.max_size); + if let Err(e) = conn.execute("DELETE FROM nonces", []) { + warn!("error clearing nonces table: {e}"); + } + } + + /// Get a nonce. + /// First, the RAM cache will be checked for this address. + /// If absent, then the `nonces` table will be queried for this address. + /// If absent, then the MARF will be queried for this address. + /// + /// If not in RAM, the nonce will be opportunistically stored to the `nonces` table. If that + /// fails due to lock contention, then the method will return `true` for its second tuple argument. + /// + /// Returns (nonce, should-try-store-again?) + pub fn get( + &mut self, + address: &StacksAddress, + clarity_tx: &mut C, + mempool_db: &mut DBConn, + ) -> u64 + where + C: ClarityConnection, + { + // Check in-memory cache + match self.cache.get(address) { + Ok(Some(nonce)) => return nonce, + Ok(None) => {} + Err(_) => { + // The cache is corrupt, reset it + self.reset_cache(mempool_db); + } + } + + // Check sqlite cache + let db_nonce_opt = db_get_nonce(mempool_db, address).unwrap_or_else(|e| { + warn!("error retrieving nonce from mempool db: {e}"); + None + }); + if let Some(db_nonce) = db_nonce_opt { + // Insert into in-memory cache, but it is not dirty, + // since we just got it from the database. + let evicted = match self.cache.insert_clean(address.clone(), db_nonce) { + Ok(evicted) => evicted, + Err(_) => { + // The cache is corrupt, reset it + self.reset_cache(mempool_db); + None + } + }; + if evicted.is_some() { + // If we evicted something, we need to flush the cache. + self.flush_with_evicted(mempool_db, evicted); + } + return db_nonce; + } + + // Check the chainstate + let nonce = StacksChainState::get_nonce(clarity_tx, &address.clone().into()); + + self.set(address.clone(), nonce, mempool_db); + nonce + } + + /// Set the nonce for `address` to `value` in the in-memory cache. + /// If this causes an eviction, flush the in-memory cache to the DB. + pub fn set(&mut self, address: StacksAddress, value: u64, conn: &mut DBConn) { + let evicted = match self.cache.insert(address.clone(), value) { + Ok(evicted) => evicted, + Err(_) => { + // The cache is corrupt, reset it + self.reset_cache(conn); + Some((address, value)) + } + }; + if evicted.is_some() { + // If we evicted something, we need to flush the cache. + self.flush_with_evicted(conn, evicted); + } + } + + /// Flush the in-memory cache the the DB, including `evicted`. + /// Do not return until successful. + pub fn flush_with_evicted(&mut self, conn: &mut DBConn, evicted: Option<(StacksAddress, u64)>) { + const MAX_BACKOFF: Duration = Duration::from_secs(30); + let mut backoff = Duration::from_millis(rand::thread_rng().gen_range(50..200)); + + loop { + let result = self.try_flush_with_evicted(conn, evicted); + + match result { + Ok(_) => return, // Success: exit the loop + Err(e) => { + // Calculate a backoff duration + warn!("Nonce cache flush failed: {e}. Retrying in {backoff:?}"); + + // Sleep for the backoff duration + thread::sleep(backoff); + + if backoff < MAX_BACKOFF { + // Exponential backoff + backoff = backoff * 2 + + Duration::from_millis(rand::thread_rng().gen_range(50..200)); + } + } + } + } + } + + /// Try to flush the in-memory cache the the DB, including `evicted`. + pub fn try_flush_with_evicted( + &mut self, + conn: &mut DBConn, + evicted: Option<(StacksAddress, u64)>, + ) -> Result<(), db_error> { + // Flush the cache to the database + let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; + + let tx = conn.transaction()?; + + if let Some((addr, nonce)) = evicted { + tx.execute(sql, params![addr, nonce])?; + } + + match self.cache.flush(|addr, nonce| { + tx.execute(sql, params![addr, nonce])?; + Ok::<(), db_error>(()) + }) { + Ok(_) => {} + Err(FlushError::LruCacheCorrupted) => { + drop(tx); + // The cache is corrupt, reset it and return + self.reset_cache(conn); + return Ok(()); + } + Err(FlushError::FlushError(e)) => return Err(e), + }; + + tx.commit()?; + + Ok(()) + } + + /// Flush the in-memory cache to the DB. + /// Do not return until successful. + pub fn flush(&mut self, conn: &mut DBConn) { + self.flush_with_evicted(conn, None) + } +} + +fn db_set_nonce(conn: &DBConn, address: &StacksAddress, nonce: u64) -> Result<(), db_error> { + let addr_str = address.to_string(); + let nonce_i64 = u64_to_sql(nonce)?; + + let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; + conn.execute(sql, params![addr_str, nonce_i64])?; + Ok(()) +} + +fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, db_error> { + let addr_str = address.to_string(); + + let sql = "SELECT nonce FROM nonces WHERE address = ?"; + query_row(conn, sql, params![addr_str]) +} + +#[cfg(test)] +mod tests { + use clarity::consts::CHAIN_ID_TESTNET; + use clarity::types::chainstate::StacksBlockId; + use clarity::types::Address; + use clarity::vm::tests::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; + + use super::*; + use crate::chainstate::stacks::db::test::{chainstate_path, instantiate_chainstate}; + use crate::chainstate::stacks::index::ClarityMarfTrieId; + use crate::clarity_vm::clarity::ClarityInstance; + use crate::clarity_vm::database::marf::MarfedKV; + + #[test] + fn test_nonce_cache() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(2); + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + let addr3 = + StacksAddress::from_string("ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG").unwrap(); + + let conn = &mut mempool.db; + cache.set(addr1.clone(), 1, conn); + cache.set(addr2.clone(), 2, conn); + + let marf = MarfedKV::temporary(); + let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); + clarity_instance + .begin_test_genesis_block( + &StacksBlockId::sentinel(), + &StacksBlockId([0u8; 32]), + &TEST_HEADER_DB, + &TEST_BURN_STATE_DB, + ) + .commit_block(); + let mut clarity_conn = clarity_instance.begin_block( + &StacksBlockId([0 as u8; 32]), + &StacksBlockId([1 as u8; 32]), + &TEST_HEADER_DB, + &TEST_BURN_STATE_DB, + ); + + clarity_conn.as_transaction(|clarity_tx| { + assert_eq!(cache.get(&addr1, clarity_tx, conn), 1); + assert_eq!(cache.get(&addr2, clarity_tx, conn), 2); + // addr3 is not in the cache, so it should be fetched from the + // clarity instance (and get 0) + assert_eq!(cache.get(&addr3, clarity_tx, conn), 0); + }); + } + + #[test] + fn test_db_set_nonce() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let conn = &mut mempool.db; + let addr = StacksAddress::from_string("ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC").unwrap(); + db_set_nonce(&conn, &addr, 123).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 123); + } + + #[test] + fn test_nonce_cache_eviction() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(2); // Cache size of 2 + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + let addr3 = + StacksAddress::from_string("ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG").unwrap(); + + let conn = &mut mempool.db; + + // Fill cache to capacity + cache.set(addr1.clone(), 1, conn); + cache.set(addr2.clone(), 2, conn); + + // This should cause addr1 to be evicted + cache.set(addr3.clone(), 3, conn); + + // Verify addr1 was written to DB during eviction + assert_eq!(db_get_nonce(&conn, &addr1).unwrap().unwrap(), 1); + } + + #[test] + fn test_nonce_cache_flush() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(3); + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + + let conn = &mut mempool.db; + + cache.set(addr1.clone(), 5, conn); + cache.set(addr2.clone(), 10, conn); + + // Explicitly flush cache + cache.flush(conn); + + // Verify both entries were written to DB + assert_eq!(db_get_nonce(&conn, &addr1).unwrap().unwrap(), 5); + assert_eq!(db_get_nonce(&conn, &addr2).unwrap().unwrap(), 10); + } + + #[test] + fn test_db_nonce_overwrite() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let conn = &mut mempool.db; + + let addr = StacksAddress::from_string("ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC").unwrap(); + + // Set initial nonce + db_set_nonce(&conn, &addr, 1).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 1); + + // Overwrite with new nonce + db_set_nonce(&conn, &addr, 2).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 2); + } +} diff --git a/stackslib/src/core/test_util.rs b/stackslib/src/core/test_util.rs new file mode 100644 index 00000000000..519d6600135 --- /dev/null +++ b/stackslib/src/core/test_util.rs @@ -0,0 +1,514 @@ +use std::io::Cursor; + +use chrono::Utc; +use clarity::codec::StacksMessageCodec; +use clarity::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, +}; +use clarity::vm::tests::BurnStateDB; +use clarity::vm::types::PrincipalData; +use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; +use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::{ + CoinbasePayload, StacksBlock, StacksMicroblock, StacksMicroblockHeader, StacksTransaction, + StacksTransactionSigner, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionSpendingCondition, TransactionVersion, +}; +use crate::util_lib::strings::StacksString; + +#[allow(clippy::too_many_arguments)] +pub fn sign_sponsored_sig_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: &StacksPrivateKey, + sender_nonce: u64, + payer_nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + sign_tx_anchor_mode_version( + payload, + sender, + Some(payer), + sender_nonce, + Some(payer_nonce), + tx_fee, + chain_id, + anchor_mode, + version, + ) +} + +pub fn sign_standard_single_sig_tx( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, +) -> StacksTransaction { + sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OnChainOnly, + ) +} + +pub fn sign_standard_single_sig_tx_anchor_mode( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, +) -> StacksTransaction { + sign_standard_single_sig_tx_anchor_mode_version( + payload, + sender, + nonce, + tx_fee, + chain_id, + anchor_mode, + TransactionVersion::Testnet, + ) +} + +pub fn sign_standard_single_sig_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + sign_tx_anchor_mode_version( + payload, + sender, + None, + nonce, + None, + tx_fee, + chain_id, + anchor_mode, + version, + ) +} + +#[allow(clippy::too_many_arguments)] +pub fn sign_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: Option<&StacksPrivateKey>, + sender_nonce: u64, + payer_nonce: Option, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + let mut sender_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) + .expect("Failed to create p2pkh spending condition from public key."); + sender_spending_condition.set_nonce(sender_nonce); + + let auth = match (payer, payer_nonce) { + (Some(payer), Some(payer_nonce)) => { + let mut payer_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( + StacksPublicKey::from_private(payer), + ) + .expect("Failed to create p2pkh spending condition from public key."); + payer_spending_condition.set_nonce(payer_nonce); + payer_spending_condition.set_tx_fee(tx_fee); + TransactionAuth::Sponsored(sender_spending_condition, payer_spending_condition) + } + _ => { + sender_spending_condition.set_tx_fee(tx_fee); + TransactionAuth::Standard(sender_spending_condition) + } + }; + let mut unsigned_tx = StacksTransaction::new(version, auth, payload); + unsigned_tx.anchor_mode = anchor_mode; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + unsigned_tx.chain_id = chain_id; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + tx_signer.sign_origin(sender).unwrap(); + if let (Some(payer), Some(_)) = (payer, payer_nonce) { + tx_signer.sign_sponsor(payer).unwrap(); + } + + tx_signer.get_tx().unwrap() +} + +#[allow(clippy::too_many_arguments)] +pub fn serialize_sign_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: Option<&StacksPrivateKey>, + sender_nonce: u64, + payer_nonce: Option, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> Vec { + let tx = sign_tx_anchor_mode_version( + payload, + sender, + payer, + sender_nonce, + payer_nonce, + tx_fee, + chain_id, + anchor_mode, + version, + ); + + let mut buf = vec![]; + tx.consensus_serialize(&mut buf).unwrap(); + buf +} + +pub fn make_contract_publish_versioned( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, + version: Option, +) -> Vec { + let name = ContractName::from(contract_name); + let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); + + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); + + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_contract_publish( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_versioned( + sender, + nonce, + tx_fee, + chain_id, + contract_name, + contract_content, + None, + ) +} + +pub fn make_contract_publish_microblock_only_versioned( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, + version: Option, +) -> Vec { + let name = ContractName::from(contract_name); + let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); + + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); + + let tx = sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_contract_publish_microblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_microblock_only_versioned( + sender, + nonce, + tx_fee, + chain_id, + contract_name, + contract_content, + None, + ) +} + +pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sk)) +} + +pub fn make_stacks_transfer( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_sponsored_stacks_transfer_on_testnet( + sender: &StacksPrivateKey, + payer: &StacksPrivateKey, + sender_nonce: u64, + payer_nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_sponsored_sig_tx_anchor_mode_version( + payload, + sender, + payer, + sender_nonce, + payer_nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OnChainOnly, + TransactionVersion::Testnet, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_stacks_transfer_mblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_poison( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + header_1: StacksMicroblockHeader, + header_2: StacksMicroblockHeader, +) -> Vec { + let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { + let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_contract_call( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_addr: &StacksAddress, + contract_name: &str, + function_name: &str, + function_args: &[Value], +) -> Vec { + let contract_name = ContractName::from(contract_name); + let function_name = ClarityName::from(function_name); + + let payload = TransactionContractCall { + address: *contract_addr, + contract_name, + function_name, + function_args: function_args.to_vec(), + }; + + let tx = sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_contract_call_mblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_addr: &StacksAddress, + contract_name: &str, + function_name: &str, + function_args: &[Value], +) -> Vec { + let contract_name = ContractName::from(contract_name); + let function_name = ClarityName::from(function_name); + + let payload = TransactionContractCall { + address: *contract_addr, + contract_name, + function_name, + function_args: function_args.to_vec(), + }; + + let tx = sign_standard_single_sig_tx_anchor_mode( + payload.into(), + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_microblock( + privk: &StacksPrivateKey, + chainstate: &mut StacksChainState, + burn_dbconn: &dyn BurnStateDB, + consensus_hash: ConsensusHash, + block: StacksBlock, + txs: Vec, +) -> StacksMicroblock { + let mut block_bytes = vec![]; + block.consensus_serialize(&mut block_bytes).unwrap(); + + let mut microblock_builder = StacksMicroblockBuilder::new( + block.block_hash(), + consensus_hash, + chainstate, + burn_dbconn, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + let mempool_txs: Vec<_> = txs + .into_iter() + .map(|tx| { + // TODO: better fee estimation + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + (tx, tx_bytes.len() as u64) + }) + .collect(); + + // NOTE: we intentionally do not check the block's microblock pubkey hash against the private + // key, because we may need to test that microblocks get rejected due to bad signatures. + microblock_builder + .mine_next_microblock_from_txs(mempool_txs, privk) + .unwrap() +} + +pub fn insert_tx_in_mempool( + db_tx: &rusqlite::Transaction, + tx_hex: Vec, + origin_addr: &StacksAddress, + origin_nonce: u64, + fee: u64, + consensus_hash: &ConsensusHash, + block_header_hash: &BlockHeaderHash, + height: u64, +) { + let sql = "INSERT OR REPLACE INTO mempool ( + txid, + origin_address, + origin_nonce, + sponsor_address, + sponsor_nonce, + tx_fee, + length, + consensus_hash, + block_header_hash, + height, + accept_time, + tx, + fee_rate) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)"; + + let origin_addr_str = origin_addr.to_string(); + let length = tx_hex.len() as u64; + let fee_rate = fee / length * 30; + + let txid = { + let mut cursor = Cursor::new(&tx_hex); + StacksTransaction::consensus_deserialize(&mut cursor) + .expect("Failed to deserialize transaction") + .txid() + }; + let args = rusqlite::params![ + txid, + origin_addr_str, + origin_nonce, + origin_addr_str, + origin_nonce, + fee, + length, + consensus_hash, + block_header_hash, + height, + Utc::now().timestamp(), + tx_hex, + fee_rate + ]; + db_tx + .execute(sql, args) + .expect("Failed to insert transaction into mempool"); +} diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index ec5fcf0ec7c..00010874ca2 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -39,6 +39,7 @@ use stacks_common::util::secp256k1::{MessageSignature, *}; use stacks_common::util::vrf::VRFProof; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log, sleep_ms}; +use super::mempool::MemPoolWalkStrategy; use super::MemPoolDB; use crate::burnchains::{Address, Txid}; use crate::chainstate::burn::ConsensusHash; @@ -64,6 +65,7 @@ use crate::core::mempool::{ db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, MemPoolWalkTxTypes, TxTag, BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, }; +use crate::core::test_util::{insert_tx_in_mempool, make_stacks_transfer, to_addr}; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::net::Error as NetError; use crate::util_lib::bloom::test::setup_bloom_counter; @@ -278,7 +280,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -316,7 +317,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -334,7 +334,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); chainstate.with_read_only_clarity_tx( @@ -353,7 +353,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -374,7 +373,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); // The mempool iterator no longer does any consideration of what block accepted @@ -395,7 +394,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -416,7 +414,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); chainstate.with_read_only_clarity_tx( @@ -435,7 +433,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -456,7 +453,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); // let's test replace-across-fork while we're here. @@ -661,7 +658,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -679,7 +675,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { ); // Next with 0% - let _ = mempool.reset_nonce_cache(); + let _ = mempool.reset_mempool_caches(); mempool_settings.consider_no_estimate_tx_prob = 0; chainstate.with_read_only_clarity_tx( @@ -698,7 +694,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -716,7 +711,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { ); // Then with with 100% - let _ = mempool.reset_nonce_cache(); + let _ = mempool.reset_mempool_caches(); mempool_settings.consider_no_estimate_tx_prob = 100; chainstate.with_read_only_clarity_tx( @@ -735,7 +730,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -844,7 +838,6 @@ fn test_iterate_candidates_skipped_transaction() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -959,7 +952,6 @@ fn test_iterate_candidates_processing_error_transaction() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -1074,7 +1066,6 @@ fn test_iterate_candidates_problematic_transaction() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -1226,7 +1217,6 @@ fn test_iterate_candidates_concurrent_write_lock() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -2725,7 +2715,6 @@ fn test_filter_txs_by_type() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -2760,7 +2749,6 @@ fn test_filter_txs_by_type() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -2777,3 +2765,87 @@ fn test_filter_txs_by_type() { }, ); } + +#[test] +fn large_mempool() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let mut senders = (0..1024) + .map(|_| (StacksPrivateKey::random(), 0)) + .collect::>(); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let b = make_block( + &mut chainstate, + ConsensusHash([0x2; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 2, + 2, + ); + let block_height = 10; + + println!("Adding transactions to mempool"); + let mempool_tx = mempool.tx_begin().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = to_addr(sender_sk); + let fee = thread_rng().gen_range(180..2000); + let transfer_tx = + make_stacks_transfer(sender_sk, *nonce, fee, 0x80000000, &recipient, 1); + insert_tx_in_mempool( + &mempool_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &ConsensusHash([0x2; 20]), + &FIRST_STACKS_BLOCK_HASH, + block_height, + ); + *nonce += 1; + } + } + mempool_tx.commit().unwrap(); + + let mut mempool_settings = MemPoolWalkSettings::default(); + mempool_settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + let mut tx_events = Vec::new(); + + println!("Iterating mempool"); + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b.0, &b.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::ZERO, + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap(); + // It should be able to iterate through at least 10000 transactions in 5s + assert!(count_txs > 10000); + }, + ); +} diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index 927c0a50d8a..e3b2515e018 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -1,5 +1,6 @@ use std::env; use std::path::PathBuf; +use std::time::Instant; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, StandardPrincipalData}; @@ -11,7 +12,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::vrf::VRFProof; -use time::Instant; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::db::{StacksEpochReceipt, StacksHeaderInfo}; diff --git a/stackslib/src/cost_estimates/tests/fee_scalar.rs b/stackslib/src/cost_estimates/tests/fee_scalar.rs index 04c1fc27a74..c7f39c29211 100644 --- a/stackslib/src/cost_estimates/tests/fee_scalar.rs +++ b/stackslib/src/cost_estimates/tests/fee_scalar.rs @@ -1,5 +1,6 @@ use std::env; use std::path::PathBuf; +use std::time::Instant; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, StandardPrincipalData}; @@ -11,7 +12,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::vrf::VRFProof; -use time::Instant; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::db::{StacksEpochReceipt, StacksHeaderInfo}; diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 80f1f8c07cb..6dd0a316cd6 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -22,9 +22,6 @@ #[macro_use] extern crate stacks_common; -#[macro_use(slog_debug, slog_info, slog_warn)] -extern crate slog; - #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -37,7 +34,7 @@ use std::fs::File; use std::io::prelude::*; use std::io::BufReader; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; -use std::time::Duration; +use std::time::{Duration, Instant}; use std::{env, fs, io, process, thread}; use blockstack_lib::burnchains::bitcoin::{spv, BitcoinNetworkType}; @@ -605,7 +602,7 @@ Given a , obtain a 2100 header hash block inventory (with an empty let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) .expect("Failed to get sortition chain tip"); - let start = time::Instant::now(); + let start = Instant::now(); let header_hashes = { let ic = sort_db.index_conn(); @@ -614,14 +611,11 @@ Given a , obtain a 2100 header hash block inventory (with an empty .unwrap() }; - println!( - "Fetched header hashes in {}", - start.elapsed().as_seconds_f32() - ); - let start = time::Instant::now(); + println!("Fetched header hashes in {}", start.elapsed().as_secs_f32()); + let start = Instant::now(); let block_inv = chain_state.get_blocks_inventory(&header_hashes).unwrap(); - println!("Fetched block inv in {}", start.elapsed().as_seconds_f32()); + println!("Fetched block inv in {}", start.elapsed().as_secs_f32()); println!("{:?}", &block_inv); println!("Done!"); @@ -652,7 +646,7 @@ check if the associated microblocks can be downloaded let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) .expect("Failed to get sortition chain tip"); - let start = time::Instant::now(); + let start = Instant::now(); let local_peer = LocalPeer::new( 0, 0, @@ -671,12 +665,9 @@ check if the associated microblocks can be downloaded .unwrap() }; - println!( - "Fetched header hashes in {}", - start.elapsed().as_seconds_f32() - ); + println!("Fetched header hashes in {}", start.elapsed().as_secs_f32()); - let start = time::Instant::now(); + let start = Instant::now(); let mut total_load_headers = 0; for (consensus_hash, block_hash_opt) in header_hashes.iter() { @@ -736,7 +727,7 @@ check if the associated microblocks can be downloaded println!( "Checked can_download in {} (headers load took {}ms)", - start.elapsed().as_seconds_f32(), + start.elapsed().as_secs_f32(), total_load_headers ); diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index cff12f2242a..24937bbd0a9 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -20,7 +20,7 @@ use stacks_common::codec::read_next; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, }; -use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::HexError; use crate::burnchains::Txid; @@ -244,3 +244,4 @@ impl_hex_deser!(VRFSeed); impl_hex_deser!(ConsensusHash); impl_hex_deser!(BlockHeaderHash); impl_hex_deser!(Hash160); +impl_hex_deser!(Sha512Trunc256Sum); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 7047eba6109..0ba13f2f8fb 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -549,6 +549,7 @@ impl NakamotoBlockProposal { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ); let err = match tx_result { TransactionResult::Success(_) => Ok(()), @@ -753,7 +754,7 @@ impl RPCRequestHandler for RPCBlockProposalRequestHandler { info!( "Received block proposal request"; - "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "signer_signature_hash" => %block_proposal.block.header.signer_signature_hash(), "block_header_hash" => %block_proposal.block.header.block_hash(), "height" => block_proposal.block.header.chain_length, "tx_count" => block_proposal.block.txs.len(), diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs index 2b5abcfb362..360f75f0fc6 100644 --- a/stackslib/src/net/api/tests/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -37,6 +37,7 @@ fn make_preamble(start: &T, stop: &R) -> HttpRequestPrea content_length: Some(0), keep_alive: false, headers: BTreeMap::new(), + set_cookie: Vec::new(), } } diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs index 381706c50e7..612a478517c 100644 --- a/stackslib/src/net/api/tests/getsigner.rs +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -41,6 +41,7 @@ fn make_preamble(query: &str) -> HttpRequestPreamble { content_length: Some(0), keep_alive: false, headers: BTreeMap::new(), + set_cookie: Vec::new(), } } diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index 5a8e9ae034b..a961f43f581 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -40,6 +40,7 @@ fn make_preamble(query: &str) -> HttpRequestPreamble { content_length: Some(0), keep_alive: false, headers: BTreeMap::new(), + set_cookie: Vec::new(), } } diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 9347d8384bd..f561567d3cb 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -309,6 +309,7 @@ fn test_try_make_response() { tx.tx_len(), &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ); let block = builder.mine_nakamoto_block(&mut tenure_tx); Ok(block) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 14e6c20eebe..a2a5a9b2d95 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -149,6 +149,8 @@ pub struct NakamotoTenureDownloader { pub tenure_end_block: Option, /// Tenure blocks pub tenure_blocks: Option>, + /// Whether this tenure is unconfirmed + pub is_tenure_unconfirmed: bool, } impl NakamotoTenureDownloader { @@ -161,6 +163,7 @@ impl NakamotoTenureDownloader { naddr: NeighborAddress, start_signer_keys: RewardSet, end_signer_keys: RewardSet, + is_tenure_unconfirmed: bool, ) -> Self { debug!( "Instantiate downloader to {}-{} for tenure {}: {}-{}", @@ -187,6 +190,7 @@ impl NakamotoTenureDownloader { tenure_start_block: None, tenure_end_block: None, tenure_blocks: None, + is_tenure_unconfirmed, } } diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 5a1990961b8..8b62133641a 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -520,6 +520,7 @@ impl NakamotoTenureDownloaderSet { naddr.clone(), start_reward_set.clone(), end_reward_set.clone(), + false, ); debug!("Request tenure {ch} from neighbor {naddr}"); @@ -671,14 +672,18 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { - info!( - "Downloader for tenure {} is finished", - &downloader.tenure_id_consensus_hash - ); - debug!( - "Downloader for tenure {} finished on {naddr}", - &downloader.tenure_id_consensus_hash, - ); + if downloader.is_tenure_unconfirmed { + debug!( + "Downloader for tenure {} finished on {naddr}", + &downloader.tenure_id_consensus_hash, + ); + } else { + info!( + "Downloader for tenure {} is finished", + &downloader.tenure_id_consensus_hash + ); + } + finished.push(naddr.clone()); finished_tenures.push(CompletedTenure::from(downloader)); continue; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 2a330edb78a..579ee3e4949 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -742,6 +742,7 @@ impl NakamotoUnconfirmedTenureDownloader { self.naddr.clone(), confirmed_signer_keys.clone(), unconfirmed_signer_keys.clone(), + true, ); Ok(ntd) diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index 8ccb2141462..aa2c3194192 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::btree_map::Entry; use std::collections::{BTreeMap, HashMap, HashSet}; +use std::fmt::Display; use std::io::{Read, Write}; use percent_encoding::percent_decode_str; @@ -54,6 +56,8 @@ pub struct HttpRequestPreamble { pub keep_alive: bool, /// Other headers that were not consumed in parsing pub headers: BTreeMap, + /// `Set-Cookie` headers + pub set_cookie: Vec, } impl HttpRequestPreamble { @@ -74,6 +78,7 @@ impl HttpRequestPreamble { content_length: None, keep_alive, headers: BTreeMap::new(), + set_cookie: vec![], } } @@ -105,6 +110,7 @@ impl HttpRequestPreamble { content_length: None, keep_alive: true, headers: BTreeMap::new(), + set_cookie: vec![], } } @@ -187,10 +193,10 @@ impl HttpRequestPreamble { return Some(format!("{}", &self.host)); } "content-type" => { - return self.content_type.clone().map(|ct| format!("{}", &ct)); + return self.content_type.as_ref().map(HttpContentType::to_string); } "content-length" => { - return self.content_length.clone().map(|cl| format!("{}", &cl)); + return self.content_length.as_ref().map(u32::to_string); } _ => { return self.headers.get(&hdr).cloned(); @@ -371,9 +377,10 @@ impl StacksMessageCodec for HttpRequestPreamble { let mut headers: BTreeMap = BTreeMap::new(); let mut seen_headers: HashSet = HashSet::new(); + let mut set_cookie = vec![]; - for i in 0..req.headers.len() { - let value = String::from_utf8(req.headers[i].value.to_vec()).map_err(|_e| { + for req_header in req.headers.iter() { + let value = String::from_utf8(req_header.value.to_vec()).map_err(|_e| { CodecError::DeserializeError( "Invalid HTTP header value: not utf-8".to_string(), ) @@ -389,7 +396,7 @@ impl StacksMessageCodec for HttpRequestPreamble { )); } - let key = req.headers[i].name.to_string().to_lowercase(); + let key = req_header.name.to_lowercase(); if seen_headers.contains(&key) { return Err(CodecError::DeserializeError(format!( @@ -397,23 +404,25 @@ impl StacksMessageCodec for HttpRequestPreamble { key ))); } - seen_headers.insert(key.clone()); if key == "host" { peerhost = match value.parse::() { Ok(ph) => Some(ph), Err(_) => None, }; + seen_headers.insert(key); } else if key == "content-type" { // parse let ctype = value.to_lowercase().parse::()?; content_type = Some(ctype); + seen_headers.insert(key); } else if key == "content-length" { // parse content_length = match value.parse::() { Ok(len) => Some(len), Err(_) => None, }; + seen_headers.insert(key); } else if key == "connection" { // parse if value.to_lowercase() == "close" { @@ -425,8 +434,17 @@ impl StacksMessageCodec for HttpRequestPreamble { "Inavlid HTTP request: invalid Connection: header".to_string(), )); } + seen_headers.insert(key); + } else if key == "set-cookie" { + set_cookie.push(value); } else { - headers.insert(key, value); + headers + .entry(key) + .and_modify(|entry| { + entry.push_str(", "); + entry.push_str(&value); + }) + .or_insert(value); } } @@ -445,6 +463,7 @@ impl StacksMessageCodec for HttpRequestPreamble { content_length, keep_alive, headers, + set_cookie, }) } } diff --git a/stackslib/src/net/http/tests.rs b/stackslib/src/net/http/tests.rs index 55747e18fa9..3952187a4f9 100644 --- a/stackslib/src/net/http/tests.rs +++ b/stackslib/src/net/http/tests.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use stacks_common::codec::StacksMessageCodec; +use std::collections::BTreeMap; + +use stacks_common::codec::{Error as CodecError, StacksMessageCodec}; use stacks_common::types::net::{PeerAddress, PeerHost}; use crate::net::http::common::{HTTP_PREAMBLE_MAX_ENCODED_SIZE, HTTP_PREAMBLE_MAX_NUM_HEADERS}; @@ -78,6 +80,58 @@ fn test_parse_reserved_header() { } } +#[test] +fn parse_http_request_duplicate_headers() { + let tests = vec![ + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nCache-Control: no-cache\r\ncache-control: no-store\r\nConnection: close\r\n\r\n", + Ok(BTreeMap::from([("cache-control".to_string(), "no-cache, no-store".to_string())]))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nCache-Control: no-store\r\ncache-control: no-cache\r\nConnection: close\r\n\r\n", + Ok(BTreeMap::from([("cache-control".into(), "no-store, no-cache".into())]))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nHost: core2.blockstack.org\r\nConnection: close\r\n\r\n", + Err(CodecError::DeserializeError("Invalid HTTP request: duplicate header \"host\"".into()))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nConnection: close\r\nConnection: keep-alive\r\n\r\n", + Err(CodecError::DeserializeError("Invalid HTTP request: duplicate header \"connection\"".into()))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nContent-Type: application/json\r\nContent-Type: application/json\r\nConnection: close\r\n\r\n", + Err(CodecError::DeserializeError("Invalid HTTP request: duplicate header \"content-type\"".into()))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nContent-Length: 10\r\nContent-length: 5\r\nConnection: close\r\n\r\n", + Err(CodecError::DeserializeError("Invalid HTTP request: duplicate header \"content-length\"".into()))), + ]; + + for (data, expected) in tests.into_iter() { + let result = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); + match result { + Ok(req) => { + let expected = expected.unwrap(); + assert_eq!(req.headers, expected); + } + Err(e) => { + let expected = expected.unwrap_err(); + assert_eq!(format!("{expected:?}"), format!("{e:?}")); + } + } + } +} + +#[test] +fn parse_http_request_set_cookie() { + let tests = vec![ + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nConnection: close\r\n\r\n", + vec![]), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nset-Cookie: a1\r\nSet-Cookie: a2\r\nConnection: close\r\n\r\n", + vec!["a1".to_string(), "a2".to_string()]), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nset-Cookie: a2\r\nSet-Cookie: a1\r\nConnection: close\r\n\r\n", + vec!["a2".to_string(), "a1".to_string()]), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nset-Cookie: a1\r\nConnection: close\r\n\r\n", + vec!["a1".to_string()]), + ]; + + for (data, expected) in tests.into_iter() { + let req = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()) + .expect("Should be able to parse the set-cookie requests"); + assert_eq!(req.set_cookie, expected); + } +} + #[test] fn test_parse_http_request_preamble_ok() { let tests = vec![ diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 616ea8f81fb..ea8741f63c1 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3097,6 +3097,7 @@ pub mod test { &boot_code_smart_contract, &boot_code_account, ASTRules::PrecheckSize, + None, ) .unwrap() }); @@ -4929,14 +4930,4 @@ pub mod test { acct } } - - pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { - StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(sk)], - ) - .unwrap() - } } diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index cc53f22a4f1..9bd62705663 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -43,10 +43,11 @@ use crate::chainstate::stacks::{ TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; +use crate::core::test_util::to_addr; use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::download::nakamoto::{TenureStartEnd, WantedTenure, *}; use crate::net::inv::nakamoto::NakamotoTenureInv; -use crate::net::test::{dns_thread_start, to_addr, TestEventObserver}; +use crate::net::test::{dns_thread_start, TestEventObserver}; use crate::net::tests::inv::nakamoto::{ make_nakamoto_peer_from_invs, make_nakamoto_peers_from_invs_ext, peer_get_nakamoto_invs, }; @@ -292,6 +293,7 @@ fn test_nakamoto_tenure_downloader() { naddr, reward_set.clone(), reward_set, + false, ); // must be first block diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 625cb7cd017..ce94e4865aa 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -43,10 +43,11 @@ use crate::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; +use crate::core::test_util::to_addr; use crate::core::StacksEpochExtension; use crate::net::inv::nakamoto::{InvGenerator, NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::comms::NeighborComms; -use crate::net::test::{to_addr, TestEventObserver, TestPeer}; +use crate::net::test::{TestEventObserver, TestPeer}; use crate::net::tests::{NakamotoBootPlan, NakamotoBootStep, NakamotoBootTenure}; use crate::net::{ Error as NetError, GetNakamotoInvData, HandshakeData, NakamotoInvData, NeighborAddress, diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 9576ae7e546..f6b38f42de5 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -32,6 +32,7 @@ use crate::burnchains::*; use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::stacks::test::*; use crate::chainstate::stacks::*; +use crate::core::test_util::to_addr; use crate::core::StacksEpochExtension; use crate::net::atlas::*; use crate::net::codec::*; diff --git a/stackslib/src/util_lib/signed_structured_data.rs b/stackslib/src/util_lib/signed_structured_data.rs index ead99de5f21..7aadc403d6f 100644 --- a/stackslib/src/util_lib/signed_structured_data.rs +++ b/stackslib/src/util_lib/signed_structured_data.rs @@ -253,6 +253,7 @@ pub mod pox4 { body, None, |_, _| false, + None, ) .unwrap(); clarity_db diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 2bc98533be8..5cab6be2ada 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -744,11 +744,6 @@ impl BitcoinRegtestController { utxos_to_exclude: Option, block_height: u64, ) -> Option { - // if mock mining, do not even bother requesting UTXOs - if self.config.get_node_config(false).mock_mining { - return None; - } - let pubk = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { let mut p = *public_key; p.set_compressed(true); @@ -1693,6 +1688,11 @@ impl BitcoinRegtestController { // in RBF, you have to consume the same UTXOs utxos } else { + // if mock mining, do not even bother requesting UTXOs + if self.config.node.mock_mining { + return Err(BurnchainControllerError::NoUTXOs); + } + // Fetch some UTXOs let addr = self.get_miner_address(epoch_id, public_key); match self.get_utxos( diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 982912d99a2..d4c175ae01e 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -338,20 +338,6 @@ impl RewardSetEventPayload { static TEST_EVENT_OBSERVER_SKIP_RETRY: LazyLock> = LazyLock::new(TestFlag::default); impl EventObserver { - fn init_db(db_path: &str) -> Result { - let conn = Connection::open(db_path)?; - conn.execute( - "CREATE TABLE IF NOT EXISTS pending_payloads ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - url TEXT NOT NULL, - payload TEXT NOT NULL, - timeout INTEGER NOT NULL - )", - [], - )?; - Ok(conn) - } - fn insert_payload( conn: &Connection, url: &str, @@ -403,68 +389,17 @@ impl EventObserver { } } - fn get_pending_payloads( - conn: &Connection, - ) -> Result, db_error> { - let mut stmt = - conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads ORDER BY id")?; - let payload_iter = stmt.query_and_then( - [], - |row| -> Result<(i64, String, serde_json::Value, u64), db_error> { - let id: i64 = row.get(0)?; - let url: String = row.get(1)?; - let payload_text: String = row.get(2)?; - let payload: serde_json::Value = - serde_json::from_str(&payload_text).map_err(db_error::SerializationError)?; - let timeout_ms: u64 = row.get(3)?; - Ok((id, url, payload, timeout_ms)) - }, - )?; - payload_iter.collect() - } - fn delete_payload(conn: &Connection, id: i64) -> Result<(), db_error> { conn.execute("DELETE FROM pending_payloads WHERE id = ?1", params![id])?; Ok(()) } - fn process_pending_payloads(conn: &Connection) { - let pending_payloads = match Self::get_pending_payloads(conn) { - Ok(payloads) => payloads, - Err(e) => { - error!( - "Event observer: failed to retrieve pending payloads from database"; - "error" => ?e - ); - return; - } - }; - - for (id, url, payload, timeout_ms) in pending_payloads { - let timeout = Duration::from_millis(timeout_ms); - Self::send_payload_directly(&payload, &url, timeout, false); - - #[cfg(test)] - if TEST_EVENT_OBSERVER_SKIP_RETRY.get() { - warn!("Fault injection: delete_payload"); - return; - } - - if let Err(e) = Self::delete_payload(conn, id) { - error!( - "Event observer: failed to delete pending payload from database"; - "error" => ?e - ); - } - } - } - fn send_payload_directly( payload: &serde_json::Value, full_url: &str, timeout: Duration, disable_retries: bool, - ) { + ) -> bool { debug!( "Event dispatcher: Sending payload"; "url" => %full_url, "payload" => ?payload ); @@ -516,13 +451,13 @@ impl EventObserver { if disable_retries { warn!("Observer is configured in disable_retries mode: skipping retry of payload"); - return; + return false; } #[cfg(test)] if TEST_EVENT_OBSERVER_SKIP_RETRY.get() { warn!("Fault injection: skipping retry of payload"); - return; + return false; } sleep(backoff); @@ -533,28 +468,15 @@ impl EventObserver { ); attempts = attempts.saturating_add(1); } + true } fn new( - working_dir: Option, + db_path: Option, endpoint: String, timeout: Duration, disable_retries: bool, ) -> Self { - let db_path = if let Some(mut db_path) = working_dir { - db_path.push("event_observers.sqlite"); - - Self::init_db( - db_path - .to_str() - .expect("Failed to convert chainstate path to string"), - ) - .expect("Failed to initialize database for event observer"); - Some(db_path) - } else { - None - }; - EventObserver { db_path, endpoint, @@ -565,7 +487,7 @@ impl EventObserver { /// Send the payload to the given URL. /// Before sending this payload, any pending payloads in the database will be sent first. - pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { + pub fn send_payload(&self, payload: &serde_json::Value, path: &str, id: Option) { // Construct the full URL let url_str = if path.starts_with('/') { format!("{}{path}", &self.endpoint) @@ -581,11 +503,26 @@ impl EventObserver { let conn = Connection::open(db_path).expect("Failed to open database for event observer"); - // Insert the new payload into the database - Self::insert_payload_with_retry(&conn, &full_url, payload, self.timeout); + let id = match id { + Some(id) => id, + None => { + Self::insert_payload_with_retry(&conn, &full_url, payload, self.timeout); + conn.last_insert_rowid() + } + }; - // Process all pending payloads - Self::process_pending_payloads(&conn); + let success = Self::send_payload_directly(payload, &full_url, self.timeout, false); + // This is only `false` when the TestFlag is set to skip retries + if !success { + return; + } + + if let Err(e) = Self::delete_payload(&conn, id) { + error!( + "Event observer: failed to delete pending payload from database"; + "error" => ?e + ); + } } else { // No database, just send the payload Self::send_payload_directly(payload, &full_url, self.timeout, false); @@ -737,11 +674,11 @@ impl EventObserver { } fn send_new_attachments(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_ATTACHMENT_PROCESSED); + self.send_payload(payload, PATH_ATTACHMENT_PROCESSED, None); } fn send_new_mempool_txs(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MEMPOOL_TX_SUBMIT); + self.send_payload(payload, PATH_MEMPOOL_TX_SUBMIT, None); } /// Serializes new microblocks data into a JSON payload and sends it off to the correct path @@ -773,31 +710,31 @@ impl EventObserver { "burn_block_timestamp": burn_block_timestamp, }); - self.send_payload(&payload, PATH_MICROBLOCK_SUBMIT); + self.send_payload(&payload, PATH_MICROBLOCK_SUBMIT, None); } fn send_dropped_mempool_txs(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MEMPOOL_TX_DROP); + self.send_payload(payload, PATH_MEMPOOL_TX_DROP, None); } fn send_mined_block(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MINED_BLOCK); + self.send_payload(payload, PATH_MINED_BLOCK, None); } fn send_mined_microblock(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MINED_MICROBLOCK); + self.send_payload(payload, PATH_MINED_MICROBLOCK, None); } fn send_mined_nakamoto_block(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MINED_NAKAMOTO_BLOCK); + self.send_payload(payload, PATH_MINED_NAKAMOTO_BLOCK, None); } pub fn send_stackerdb_chunks(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_STACKERDB_CHUNKS); + self.send_payload(payload, PATH_STACKERDB_CHUNKS, None); } fn send_new_burn_block(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT); + self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT, None); } #[allow(clippy::too_many_arguments)] @@ -945,6 +882,8 @@ pub struct EventDispatcher { block_proposal_observers_lookup: HashSet, /// Channel for sending StackerDB events to the miner coordinator pub stackerdb_channel: Arc>, + /// Database path for pending payloads + db_path: Option, } /// This struct is used specifically for receiving proposal responses. @@ -966,7 +905,7 @@ impl ProposalCallbackReceiver for ProposalCallbackHandler { } }; for observer in self.observers.iter() { - observer.send_payload(&response, PATH_PROPOSAL_RESPONSE); + observer.send_payload(&response, PATH_PROPOSAL_RESPONSE, None); } } } @@ -1137,12 +1076,18 @@ impl BlockEventDispatcher for EventDispatcher { impl Default for EventDispatcher { fn default() -> Self { - EventDispatcher::new() + EventDispatcher::new(None) } } impl EventDispatcher { - pub fn new() -> EventDispatcher { + pub fn new(working_dir: Option) -> EventDispatcher { + let db_path = if let Some(mut db_path) = working_dir { + db_path.push("event_observers.sqlite"); + Some(db_path) + } else { + None + }; EventDispatcher { stackerdb_channel: Arc::new(Mutex::new(StackerDBChannel::new())), registered_observers: vec![], @@ -1157,6 +1102,7 @@ impl EventDispatcher { mined_microblocks_observers_lookup: HashSet::new(), stackerdb_observers_lookup: HashSet::new(), block_proposal_observers_lookup: HashSet::new(), + db_path, } } @@ -1370,7 +1316,11 @@ impl EventDispatcher { ); // Send payload - self.registered_observers[observer_id].send_payload(&payload, PATH_BLOCK_PROCESSED); + self.registered_observers[observer_id].send_payload( + &payload, + PATH_BLOCK_PROCESSED, + None, + ); } } } @@ -1682,10 +1632,10 @@ impl EventDispatcher { } } - pub fn register_observer(&mut self, conf: &EventObserverConfig, working_dir: PathBuf) { + pub fn register_observer(&mut self, conf: &EventObserverConfig) -> EventObserver { info!("Registering event observer at: {}", conf.endpoint); let event_observer = EventObserver::new( - Some(working_dir), + self.db_path.clone(), conf.endpoint.clone(), Duration::from_millis(conf.timeout_ms), conf.disable_retries, @@ -1757,7 +1707,119 @@ impl EventDispatcher { } } - self.registered_observers.push(event_observer); + self.registered_observers.push(event_observer.clone()); + + event_observer + } + + fn init_db(db_path: &PathBuf) -> Result { + let conn = Connection::open(db_path.to_str().unwrap())?; + conn.execute( + "CREATE TABLE IF NOT EXISTS pending_payloads ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + url TEXT NOT NULL, + payload TEXT NOT NULL, + timeout INTEGER NOT NULL + )", + [], + )?; + Ok(conn) + } + + fn get_pending_payloads( + conn: &Connection, + ) -> Result, db_error> { + let mut stmt = + conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads ORDER BY id")?; + let payload_iter = stmt.query_and_then( + [], + |row| -> Result<(i64, String, serde_json::Value, u64), db_error> { + let id: i64 = row.get(0)?; + let url: String = row.get(1)?; + let payload_text: String = row.get(2)?; + let payload: serde_json::Value = + serde_json::from_str(&payload_text).map_err(db_error::SerializationError)?; + let timeout_ms: u64 = row.get(3)?; + Ok((id, url, payload, timeout_ms)) + }, + )?; + payload_iter.collect() + } + + fn delete_payload(conn: &Connection, id: i64) -> Result<(), db_error> { + conn.execute("DELETE FROM pending_payloads WHERE id = ?1", params![id])?; + Ok(()) + } + + /// Process any pending payloads in the database. + /// This is called when the event dispatcher is first instantiated. + pub fn process_pending_payloads(&self) { + let Some(db_path) = &self.db_path else { + return; + }; + let conn = EventDispatcher::init_db(db_path).expect("Failed to initialize database"); + let pending_payloads = match Self::get_pending_payloads(&conn) { + Ok(payloads) => payloads, + Err(e) => { + error!( + "Event observer: failed to retrieve pending payloads from database"; + "error" => ?e + ); + return; + } + }; + + info!( + "Event dispatcher: processing {} pending payloads", + pending_payloads.len() + ); + + for (id, url, payload, _timeout_ms) in pending_payloads { + info!("Event dispatcher: processing pending payload: {url}"); + let full_url = Url::parse(url.as_str()) + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {url} as a URL")); + // find the right observer + let observer = self.registered_observers.iter().find(|observer| { + let endpoint_url = Url::parse(format!("http://{}", &observer.endpoint).as_str()) + .unwrap_or_else(|_| { + panic!( + "Event dispatcher: unable to parse {} as a URL", + observer.endpoint + ) + }); + full_url.origin() == endpoint_url.origin() + }); + + let Some(observer) = observer else { + // This observer is no longer registered, skip and delete + info!( + "Event dispatcher: observer {} no longer registered, skipping", + url + ); + if let Err(e) = Self::delete_payload(&conn, id) { + error!( + "Event observer: failed to delete pending payload from database"; + "error" => ?e + ); + } + continue; + }; + + observer.send_payload(&payload, full_url.path(), Some(id)); + + #[cfg(test)] + if TEST_EVENT_OBSERVER_SKIP_RETRY.get() { + warn!("Fault injection: delete_payload"); + return; + } + + if let Err(e) = Self::delete_payload(&conn, id) { + error!( + "Event observer: failed to delete pending payload from database"; + "error" => ?e + ); + } + } } } @@ -1981,10 +2043,9 @@ mod test { fn test_init_db() { let dir = tempdir().unwrap(); let db_path = dir.path().join("test_init_db.sqlite"); - let db_path_str = db_path.to_str().unwrap(); // Call init_db - let conn_result = EventObserver::init_db(db_path_str); + let conn_result = EventDispatcher::init_db(&db_path); assert!(conn_result.is_ok(), "Failed to initialize the database"); // Check that the database file exists @@ -2005,9 +2066,8 @@ mod test { fn test_insert_and_get_pending_payloads() { let dir = tempdir().unwrap(); let db_path = dir.path().join("test_payloads.sqlite"); - let db_path_str = db_path.to_str().unwrap(); - let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + let conn = EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); let url = "http://example.com/api"; let payload = json!({"key": "value"}); @@ -2019,7 +2079,7 @@ mod test { // Get pending payloads let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 1, "Expected one pending payload"); let (_id, retrieved_url, retrieved_payload, timeout_ms) = &pending_payloads[0]; @@ -2036,9 +2096,8 @@ mod test { fn test_delete_payload() { let dir = tempdir().unwrap(); let db_path = dir.path().join("test_delete_payload.sqlite"); - let db_path_str = db_path.to_str().unwrap(); - let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + let conn = EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); let url = "http://example.com/api"; let payload = json!({"key": "value"}); @@ -2050,7 +2109,7 @@ mod test { // Get pending payloads let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 1, "Expected one pending payload"); let (id, _, _, _) = pending_payloads[0]; @@ -2061,7 +2120,7 @@ mod test { // Verify that the pending payloads list is empty let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); } @@ -2071,16 +2130,26 @@ mod test { use mockito::Matcher; let dir = tempdir().unwrap(); - let db_path = dir.path().join("test_process_payloads.sqlite"); - let db_path_str = db_path.to_str().unwrap(); + let db_path = dir.path().join("event_observers.sqlite"); + let mut server = mockito::Server::new(); + let endpoint = server.host_with_port(); + info!("endpoint: {}", endpoint); + let timeout = Duration::from_secs(5); - let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + let mut dispatcher = EventDispatcher::new(Some(dir.path().to_path_buf())); + + dispatcher.register_observer(&EventObserverConfig { + endpoint: endpoint.clone(), + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: timeout.as_millis() as u64, + disable_retries: false, + }); + + let conn = EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); let payload = json!({"key": "value"}); let timeout = Duration::from_secs(5); - // Create a mock server - let mut server = mockito::Server::new(); let _m = server .mock("POST", "/api") .match_header("content-type", Matcher::Regex("application/json.*".into())) @@ -2097,11 +2166,11 @@ mod test { .expect("Failed to insert payload"); // Process pending payloads - EventObserver::process_pending_payloads(&conn); + dispatcher.process_pending_payloads(); // Verify that the pending payloads list is empty let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); // Verify that the mock was called @@ -2109,24 +2178,78 @@ mod test { } #[test] - fn test_new_event_observer_with_db() { + fn pending_payloads_are_skipped_if_url_does_not_match() { let dir = tempdir().unwrap(); - let working_dir = dir.path().to_path_buf(); + let db_path = dir.path().join("event_observers.sqlite"); - let endpoint = "http://example.com".to_string(); + let mut server = mockito::Server::new(); + let endpoint = server.host_with_port(); let timeout = Duration::from_secs(5); + let mut dispatcher = EventDispatcher::new(Some(dir.path().to_path_buf())); - let observer = - EventObserver::new(Some(working_dir.clone()), endpoint.clone(), timeout, false); + dispatcher.register_observer(&EventObserverConfig { + endpoint: endpoint.clone(), + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: timeout.as_millis() as u64, + disable_retries: false, + }); - // Verify fields - assert_eq!(observer.endpoint, endpoint); - assert_eq!(observer.timeout, timeout); + let conn = EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); + + let payload = json!({"key": "value"}); + let timeout = Duration::from_secs(5); + + let mock = server + .mock("POST", "/api") + .match_header( + "content-type", + mockito::Matcher::Regex("application/json.*".into()), + ) + .match_body(mockito::Matcher::Json(payload.clone())) + .with_status(200) + .expect(0) // Expect 0 calls to this endpoint + .create(); + + // Use a different URL than the observer's endpoint + let url = "http://different-domain.com/api"; + + EventObserver::insert_payload(&conn, url, &payload, timeout) + .expect("Failed to insert payload"); + + dispatcher.process_pending_payloads(); + + let pending_payloads = + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + // Verify that the pending payload is no longer in the database, + // because this observer is no longer registered. + assert_eq!( + pending_payloads.len(), + 0, + "Expected payload to be removed from database since URL didn't match" + ); + + mock.assert(); + } + + #[test] + fn test_new_event_dispatcher_with_db() { + let dir = tempdir().unwrap(); + let working_dir = dir.path().to_path_buf(); + + let dispatcher = EventDispatcher::new(Some(working_dir.clone())); + + let expected_db_path = working_dir.join("event_observers.sqlite"); + assert_eq!(dispatcher.db_path, Some(expected_db_path.clone())); + + assert!( + !expected_db_path.exists(), + "Database file was created too soon" + ); + + EventDispatcher::init_db(&expected_db_path).expect("Failed to initialize the database"); // Verify that the database was initialized - let mut db_path = working_dir; - db_path.push("event_observers.sqlite"); - assert!(db_path.exists(), "Database file was not created"); + assert!(expected_db_path.exists(), "Database file was not created"); } #[test] @@ -2151,6 +2274,10 @@ mod test { let working_dir = dir.path().to_path_buf(); let payload = json!({"key": "value"}); + let dispatcher = EventDispatcher::new(Some(working_dir.clone())); + let db_path = dispatcher.clone().db_path.clone().unwrap(); + EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); + // Create a mock server let mut server = mockito::Server::new(); let _m = server @@ -2163,12 +2290,12 @@ mod test { let endpoint = server.url().strip_prefix("http://").unwrap().to_string(); let timeout = Duration::from_secs(5); - let observer = EventObserver::new(Some(working_dir), endpoint, timeout, false); + let observer = EventObserver::new(Some(db_path.clone()), endpoint, timeout, false); TEST_EVENT_OBSERVER_SKIP_RETRY.set(false); // Call send_payload - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Verify that the payload was sent and database is empty _m.assert(); @@ -2178,7 +2305,7 @@ mod test { let db_path_str = db_path.to_str().unwrap(); let conn = Connection::open(db_path_str).expect("Failed to open database"); let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); } @@ -2203,7 +2330,7 @@ mod test { let observer = EventObserver::new(None, endpoint, timeout, false); // Call send_payload - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Verify that the payload was sent _m.assert(); @@ -2240,7 +2367,7 @@ mod test { let payload = json!({"key": "value"}); - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Wait for the server to process the request rx.recv_timeout(Duration::from_secs(5)) @@ -2293,7 +2420,7 @@ mod test { let payload = json!({"key": "value"}); - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Wait for the server to process the request rx.recv_timeout(Duration::from_secs(5)) @@ -2344,7 +2471,7 @@ mod test { let start_time = Instant::now(); // Call the function being tested - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Record the time after the function returns let elapsed_time = start_time.elapsed(); @@ -2388,13 +2515,13 @@ mod test { attempt += 1; match attempt { 1 => { - debug!("Mock server received request attempt 1"); + info!("Mock server received request attempt 1"); // Do not reply, forcing the sender to timeout and retry, // but don't drop the request or it will receive a 500 error, _request_holder = Some(request); } 2 => { - debug!("Mock server received request attempt 2"); + info!("Mock server received request attempt 2"); // Verify the payload let mut payload = String::new(); @@ -2407,7 +2534,7 @@ mod test { request.respond(response).unwrap(); } 3 => { - debug!("Mock server received request attempt 3"); + info!("Mock server received request attempt 3"); // Verify the payload let mut payload = String::new(); @@ -2429,12 +2556,16 @@ mod test { } }); - let observer = EventObserver::new( - Some(working_dir), - format!("127.0.0.1:{port}"), - timeout, - false, - ); + let mut dispatcher = EventDispatcher::new(Some(working_dir.clone())); + + let observer = dispatcher.register_observer(&EventObserverConfig { + endpoint: format!("127.0.0.1:{port}"), + timeout_ms: timeout.as_millis() as u64, + events_keys: vec![EventKeyType::AnyEvent], + disable_retries: false, + }); + + EventDispatcher::init_db(&dispatcher.clone().db_path.unwrap()).unwrap(); let payload = json!({"key": "value"}); let payload2 = json!({"key": "value2"}); @@ -2446,15 +2577,17 @@ mod test { info!("Sending payload 1"); // Send the payload - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Re-enable retrying TEST_EVENT_OBSERVER_SKIP_RETRY.set(false); + dispatcher.process_pending_payloads(); + info!("Sending payload 2"); // Send another payload - observer.send_payload(&payload2, "/test"); + observer.send_payload(&payload2, "/test", None); // Wait for the server to process the requests rx.recv_timeout(Duration::from_secs(5)) @@ -2475,7 +2608,7 @@ mod test { let observer = EventObserver::new(None, endpoint, timeout, true); // in non "disable_retries" mode this will run forever - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Verify that the payload was sent _m.assert(); @@ -2491,7 +2624,7 @@ mod test { let observer = EventObserver::new(None, endpoint, timeout, true); // in non "disable_retries" mode this will run forever - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); } #[test] @@ -2501,14 +2634,14 @@ mod test { let dir = tempdir().unwrap(); let working_dir = dir.path().to_path_buf(); - let mut event_dispatcher = EventDispatcher::new(); + let mut event_dispatcher = EventDispatcher::new(Some(working_dir.clone())); let config = EventObserverConfig { endpoint: String::from("255.255.255.255"), events_keys: vec![EventKeyType::MinedBlocks], timeout_ms: 1000, disable_retries: true, }; - event_dispatcher.register_observer(&config, working_dir); + event_dispatcher.register_observer(&config); let nakamoto_block = NakamotoBlock { header: NakamotoBlockHeader::empty(), diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index b70913c581e..fc8617b5f04 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -10,7 +10,7 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::MinerStatus; -use stacks::config::MinerConfig; +use stacks::config::{BurnchainConfig, MinerConfig}; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; @@ -63,6 +63,8 @@ pub struct Globals { pub leader_key_registration_state: Arc>, /// Last miner config loaded last_miner_config: Arc>>, + /// Last burnchain config + last_burnchain_config: Arc>>, /// Last miner spend amount last_miner_spend_amount: Arc>>, /// burnchain height at which we start mining @@ -93,6 +95,7 @@ impl Clone for Globals { should_keep_running: self.should_keep_running.clone(), leader_key_registration_state: self.leader_key_registration_state.clone(), last_miner_config: self.last_miner_config.clone(), + last_burnchain_config: self.last_burnchain_config.clone(), last_miner_spend_amount: self.last_miner_spend_amount.clone(), start_mining_height: self.start_mining_height.clone(), estimated_winning_probs: self.estimated_winning_probs.clone(), @@ -125,6 +128,7 @@ impl Globals { should_keep_running, leader_key_registration_state: Arc::new(Mutex::new(leader_key_registration_state)), last_miner_config: Arc::new(Mutex::new(None)), + last_burnchain_config: Arc::new(Mutex::new(None)), last_miner_spend_amount: Arc::new(Mutex::new(None)), start_mining_height: Arc::new(Mutex::new(start_mining_height)), estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), @@ -355,6 +359,28 @@ impl Globals { } } + /// Get the last burnchain config + pub fn get_last_burnchain_config(&self) -> Option { + match self.last_burnchain_config.lock() { + Ok(last_burnchain_config) => (*last_burnchain_config).clone(), + Err(_e) => { + error!("FATAL; failed to lock last burnchain config"); + panic!(); + } + } + } + + /// Set the last burnchain config + pub fn set_last_burnchain_config(&self, burnchain_config: BurnchainConfig) { + match self.last_burnchain_config.lock() { + Ok(ref mut last_burnchain_config) => **last_burnchain_config = Some(burnchain_config), + Err(_e) => { + error!("FATAL; failed to lock last burnchain config"); + panic!(); + } + } + } + /// Get the last miner spend amount pub fn get_last_miner_spend_amount(&self) -> Option { match self.last_miner_spend_amount.lock() { diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index 4e857508809..d69dfe63b87 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -118,14 +118,38 @@ impl Keychain { /// Generate a VRF proof over a given byte message. /// `block_height` must be the _same_ block height called to make_vrf_keypair() - pub fn generate_proof(&self, block_height: u64, bytes: &[u8; 32]) -> VRFProof { + pub fn generate_proof(&self, block_height: u64, bytes: &[u8; 32]) -> Option { let (pk, sk) = self.make_vrf_keypair(block_height); - let proof = VRF::prove(&sk, bytes.as_ref()); + let Some(proof) = VRF::prove(&sk, bytes.as_ref()) else { + error!( + "Failed to generate proof with keypair, will be unable to mine."; + "block_height" => block_height, + "pk" => ?pk + ); + return None; + }; // Ensure that the proof is valid by verifying - let is_valid = VRF::verify(&pk, &proof, bytes.as_ref()).unwrap_or(false); - assert!(is_valid); - proof + let is_valid = VRF::verify(&pk, &proof, bytes.as_ref()) + .inspect_err(|e| { + error!( + "Failed to validate generated proof, will be unable to mine."; + "block_height" => block_height, + "pk" => ?pk, + "err" => %e, + ); + }) + .ok()?; + if !is_valid { + error!( + "Generated invalidat proof, will be unable to mine."; + "block_height" => block_height, + "pk" => ?pk, + ); + None + } else { + Some(proof) + } } /// Generate a microblock signing key for this burnchain block height. @@ -367,7 +391,7 @@ mod tests { }; // Generate the proof - let proof = VRF::prove(vrf_sk, bytes.as_ref()); + let proof = VRF::prove(vrf_sk, bytes.as_ref())?; // Ensure that the proof is valid by verifying let is_valid = VRF::verify(vrf_pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index c49e0bbc731..b06dafbd023 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -143,6 +143,8 @@ pub enum Error { /// NetError wrapper #[error("NetError: {0}")] NetError(#[from] NetError), + #[error("Timed out waiting for signatures")] + SignatureTimeout, } impl StacksNode { diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7155cf5966b..d94ae15e5ba 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -224,6 +224,8 @@ pub struct BlockMinerThread { burn_tip_at_start: ConsensusHash, /// flag to indicate an abort driven from the relayer abort_flag: Arc, + /// Should the nonce cache be reset before mining the next block? + reset_nonce_cache: bool, } impl BlockMinerThread { @@ -257,6 +259,7 @@ impl BlockMinerThread { abort_flag: Arc::new(AtomicBool::new(false)), tenure_cost: ExecutionCost::ZERO, tenure_budget: ExecutionCost::ZERO, + reset_nonce_cache: true, } } @@ -470,7 +473,7 @@ impl BlockMinerThread { }; error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); @@ -506,6 +509,14 @@ impl BlockMinerThread { } let new_block = loop { + if self.reset_nonce_cache { + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); + mem_pool.reset_mempool_caches()?; + } + // If we're mock mining, we may not have processed the block that the // actual tenure winner committed to yet. So, before attempting to // mock mine, check if the parent is processed. @@ -550,6 +561,7 @@ impl BlockMinerThread { } info!("Miner interrupted while mining, will try again"); + // sleep, and try again. if the miner was interrupted because the burnchain // view changed, the next `mine_block()` invocation will error thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); @@ -557,6 +569,7 @@ impl BlockMinerThread { } Err(NakamotoNodeError::MiningFailure(ChainstateError::NoTransactionsToMine)) => { debug!("Miner did not find any transactions to mine"); + self.reset_nonce_cache = false; break None; } Err(e) => { @@ -583,7 +596,7 @@ impl BlockMinerThread { Err(e) => match e { NakamotoNodeError::StacksTipChanged => { info!("Stacks tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); @@ -591,7 +604,7 @@ impl BlockMinerThread { } NakamotoNodeError::BurnchainTipChanged => { info!("Burnchain tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); @@ -600,7 +613,7 @@ impl BlockMinerThread { NakamotoNodeError::StackerDBUploadError(ref ack) => { if ack.code == Some(StackerDBErrorCodes::BadSigner.code()) { error!("Error while gathering signatures: failed to upload miner StackerDB data: {ack:?}. Giving up."; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); @@ -624,7 +637,7 @@ impl BlockMinerThread { } else { info!( "Miner: Block signed by signer set and broadcasted"; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "stacks_block_hash" => %new_block.header.block_hash(), "stacks_block_id" => %new_block.header.block_id(), "block_height" => new_block.header.chain_length, @@ -1127,6 +1140,17 @@ impl BlockMinerThread { ) }; + let Some(vrf_proof) = vrf_proof else { + error!( + "Unable to generate VRF proof, will be unable to mine"; + "burn_block_sortition_hash" => %self.burn_election_block.sortition_hash, + "burn_block_block_height" => %self.burn_block.block_height, + "burn_block_hash" => %self.burn_block.burn_header_hash, + "vrf_pubkey" => &self.registered_key.vrf_public_key.to_hex() + ); + return None; + }; + debug!( "Generated VRF Proof: {} over {} ({},{}) with key {}", vrf_proof.to_hex(), @@ -1253,6 +1277,11 @@ impl BlockMinerThread { return Err(ChainstateError::MinerAborted.into()); } + // If we attempt to build a block, we should reset the nonce cache. + // In the special case where no transactions are found, this flag will + // be reset to false. + self.reset_nonce_cache = true; + // build the block itself let mut block_metadata = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, @@ -1267,7 +1296,7 @@ impl BlockMinerThread { self.config .make_nakamoto_block_builder_settings(self.globals.get_miner_status()), // we'll invoke the event dispatcher ourselves so that it calculates the - // correct signer_sighash for `process_mined_nakamoto_block_event` + // correct signer_signature_hash for `process_mined_nakamoto_block_event` Some(&self.event_dispatcher), signer_bitvec_len.unwrap_or(0), ) @@ -1301,7 +1330,7 @@ impl BlockMinerThread { block_metadata.block.header.chain_length, block_metadata.block.header.block_hash(), block_metadata.block.txs.len(); - "signer_sighash" => %block_metadata.block.header.signer_signature_hash(), + "signer_signature_hash" => %block_metadata.block.header.signer_signature_hash(), "consensus_hash" => %block_metadata.block.header.consensus_hash, "parent_block_id" => %block_metadata.block.header.parent_block_id, "timestamp" => block_metadata.block.header.timestamp, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 60ec5f73796..ddccce2deed 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -42,6 +42,7 @@ use stacks::chainstate::stacks::miner::{ set_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::config::BurnchainConfig; use stacks::core::mempool::MemPoolDB; use stacks::core::STACKS_EPOCH_3_1_MARKER; use stacks::monitoring::increment_stx_blocks_mined_counter; @@ -1101,29 +1102,7 @@ impl RelayerThread { return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); }; - let burnchain_config = self.config.get_burnchain_config(); - let last_miner_spend_opt = self.globals.get_last_miner_spend_amount(); - let force_remine = if let Some(last_miner_spend_amount) = last_miner_spend_opt { - last_miner_spend_amount != burnchain_config.burn_fee_cap - } else { - false - }; - if force_remine { - info!( - "Miner config changed; updating spend amount {}", - burnchain_config.burn_fee_cap - ); - } - - self.globals - .set_last_miner_spend_amount(burnchain_config.burn_fee_cap); - - set_mining_spend_amount( - self.globals.get_miner_status(), - burnchain_config.burn_fee_cap, - ); - // amount of burnchain tokens (e.g. sats) we'll spend across the PoX outputs - let burn_fee_cap = burnchain_config.burn_fee_cap; + let (_, burnchain_config) = self.check_burnchain_config_changed(); // let's commit, but target the current burnchain tip with our modulus so the commit is // only valid if it lands in the targeted burnchain block height @@ -1155,7 +1134,7 @@ impl RelayerThread { highest_tenure_start_block_header.index_block_hash().0, ), // the rest of this is the same as epoch2x commits, modulo the new epoch marker - burn_fee: burn_fee_cap, + burn_fee: burnchain_config.burn_fee_cap, apparent_sender: sender, key_block_ptr: u32::try_from(key.block_height) .expect("FATAL: burn block height exceeded u32"), @@ -1703,9 +1682,11 @@ impl RelayerThread { // update local state last_committed.set_txid(&txid); - self.globals - .counters - .bump_naka_submitted_commits(last_committed.burn_tip.block_height, tip_height); + self.globals.counters.bump_naka_submitted_commits( + last_committed.burn_tip.block_height, + tip_height, + last_committed.block_commit.burn_fee, + ); self.last_committed = Some(last_committed); Ok(()) @@ -1768,6 +1749,21 @@ impl RelayerThread { "burnchain view changed?" => %burnchain_changed, "highest tenure changed?" => %highest_tenure_changed); + // If the miner spend or config has changed, we want to RBF with new config values. + let (burnchain_config_changed, _) = self.check_burnchain_config_changed(); + let miner_config_changed = self.check_miner_config_changed(); + + if burnchain_config_changed || miner_config_changed { + info!("Miner spend or config changed; issuing block commit with new values"; + "miner_spend_changed" => %burnchain_config_changed, + "miner_config_changed" => %miner_config_changed, + ); + return Ok(Some(RelayerDirective::IssueBlockCommit( + stacks_tip_ch, + stacks_tip_bh, + ))); + } + if !burnchain_changed && !highest_tenure_changed { // nothing to do return Ok(None); @@ -2136,6 +2132,45 @@ impl RelayerThread { debug!("Relayer: handled directive"; "continue_running" => continue_running); continue_running } + + /// Reload config.burnchain to see if burn_fee_cap has changed. + /// If it has, update the miner spend amount and return true. + pub fn check_burnchain_config_changed(&self) -> (bool, BurnchainConfig) { + let burnchain_config = self.config.get_burnchain_config(); + let last_burnchain_config_opt = self.globals.get_last_burnchain_config(); + let burnchain_config_changed = + if let Some(last_burnchain_config) = last_burnchain_config_opt { + last_burnchain_config != burnchain_config + } else { + false + }; + + self.globals + .set_last_miner_spend_amount(burnchain_config.burn_fee_cap); + self.globals + .set_last_burnchain_config(burnchain_config.clone()); + + set_mining_spend_amount( + self.globals.get_miner_status(), + burnchain_config.burn_fee_cap, + ); + + (burnchain_config_changed, burnchain_config) + } + + pub fn check_miner_config_changed(&self) -> bool { + let miner_config = self.config.get_miner_config(); + let last_miner_config_opt = self.globals.get_last_miner_config(); + let miner_config_changed = if let Some(last_miner_config) = last_miner_config_opt { + last_miner_config != miner_config + } else { + false + }; + + self.globals.set_last_miner_config(miner_config); + + miner_config_changed + } } #[cfg(test)] diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 25c0421e83f..4e6482c25af 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -254,46 +254,57 @@ impl SignerCoordinator { }; let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); - debug!("Sending block proposal message to signers"; - "signer_signature_hash" => %block.header.signer_signature_hash(), - ); - Self::send_miners_message::( - &self.message_key, - sortdb, - election_sortition, - stackerdbs, - block_proposal_message, - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - &election_sortition.consensus_hash, - )?; - counters.bump_naka_proposed_blocks(); - #[cfg(test)] - { - info!( - "SignerCoordinator: sent block proposal to .miners, waiting for test signing channel" + loop { + debug!("Sending block proposal message to signers"; + "signer_signature_hash" => %block.header.signer_signature_hash(), ); - // In test mode, short-circuit waiting for the signers if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(signatures) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + Self::send_miners_message::( + &self.message_key, + sortdb, + election_sortition, + stackerdbs, + block_proposal_message.clone(), + MinerSlotID::BlockProposal, + self.is_mainnet, + &mut self.miners_session, + &election_sortition.consensus_hash, + )?; + counters.bump_naka_proposed_blocks(); + + #[cfg(test)] { - debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(signatures); + info!( + "SignerCoordinator: sent block proposal to .miners, waiting for test signing channel" + ); + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signatures); + } } - } - self.get_block_status( - &block.header.signer_signature_hash(), - &block.block_id(), - block.header.parent_block_id, - chain_state, - sortdb, - counters, - ) + let res = self.get_block_status( + &block.header.signer_signature_hash(), + &block.block_id(), + block.header.parent_block_id, + chain_state, + sortdb, + counters, + ); + + match res { + Err(NakamotoNodeError::SignatureTimeout) => { + info!("Block proposal signing process timed out, resending the same proposal"); + continue; + } + _ => return res, + } + } } /// Get the block status for a given block hash. @@ -340,7 +351,7 @@ impl SignerCoordinator { if rejections_timer.elapsed() > *rejections_timeout { return false; } - // number or rejections changed? + // number of rejections changed? if status.total_weight_rejected != rejections { return false; } @@ -353,7 +364,7 @@ impl SignerCoordinator { // If we just received a timeout, we should check if the burnchain // tip has changed or if we received this signed block already in // the staging db. - debug!("SignerCoordinator: Timeout waiting for block signatures"); + debug!("SignerCoordinator: Intermediate timeout waiting for block status"); // Look in the nakamoto staging db -- a block can only get stored there // if it has enough signing weight to clear the threshold. @@ -364,7 +375,7 @@ impl SignerCoordinator { warn!( "Failed to query chainstate for block: {e:?}"; "block_id" => %block_id, - "block_signer_sighash" => %block_signer_sighash, + "signer_signature_hash" => %block_signer_sighash, ); e }) @@ -380,15 +391,17 @@ impl SignerCoordinator { } if rejections_timer.elapsed() > *rejections_timeout { - warn!("Timed out while waiting for responses from signers"; - "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.as_secs(), - "rejections" => rejections, - "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + warn!("Timed out while waiting for responses from signers, resending proposal"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Timed out while waiting for signatures".into(), - )); + + // Reset the rejections in the stackerdb listener + self.stackerdb_comms.reset_rejections(block_signer_sighash); + + return Err(NakamotoNodeError::SignatureTimeout); } // Check if a new Stacks block has arrived in the parent tenure @@ -399,7 +412,7 @@ impl SignerCoordinator { )? .ok_or(NakamotoNodeError::UnexpectedChainState)?; if highest_in_tenure.index_block_hash() != parent_block_id { - debug!("SignCoordinator: Exiting due to new stacks tip"); + info!("SignCoordinator: Exiting due to new stacks tip"); return Err(NakamotoNodeError::StacksTipChanged); } @@ -437,25 +450,27 @@ impl SignerCoordinator { info!( "{}/{} signers vote to reject block", block_status.total_weight_rejected, self.total_weight; - "block_signer_sighash" => %block_signer_sighash, + "signer_signature_hash" => %block_signer_sighash, ); counters.bump_naka_rejected_blocks(); return Err(NakamotoNodeError::SignersRejected); } else if block_status.total_weight_approved >= self.weight_threshold { info!("Received enough signatures, block accepted"; - "block_signer_sighash" => %block_signer_sighash, + "signer_signature_hash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); } else if rejections_timer.elapsed() > *rejections_timeout { warn!("Timed out while waiting for responses from signers"; - "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.as_secs(), - "rejections" => rejections, - "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Timed out while waiting for signatures".into(), - )); + + // Reset the rejections in the stackerdb listener + self.stackerdb_comms.reset_rejections(block_signer_sighash); + + return Err(NakamotoNodeError::SignatureTimeout); } else { continue; } diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index c05e221610a..ef2d6bdec99 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -52,9 +52,13 @@ pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); #[derive(Debug, Clone)] pub struct BlockStatus { - pub responded_signers: HashSet, + /// Set of the slot ids of signers who have responded + pub responded_signers: HashSet, + /// Map of the slot id of signers who have signed the block and their signature pub gathered_signatures: BTreeMap, + /// Total weight of signers who have signed the block pub total_weight_approved: u32, + /// Total weight of signers who have rejected the block pub total_weight_rejected: u32, } @@ -281,7 +285,7 @@ impl StackerDBListener { info!( "StackerDBListener: Received signature for block that we did not request. Ignoring."; "signature" => %signature, - "block_signer_sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "slot_id" => slot_id, "signer_set" => self.signer_set, ); @@ -299,7 +303,7 @@ impl StackerDBListener { warn!( "StackerDBListener: Processed signature but didn't validate over the expected block. Ignoring"; "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "slot_id" => slot_id, ); continue; @@ -307,7 +311,7 @@ impl StackerDBListener { if Self::fault_injection_ignore_signatures() { warn!("StackerDBListener: fault injection: ignoring well-formed signature for block"; - "block_signer_sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, "signature" => %signature, @@ -328,7 +332,7 @@ impl StackerDBListener { } info!("StackerDBListener: Signature Added to block"; - "block_signer_sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, "signature" => %signature, @@ -342,7 +346,7 @@ impl StackerDBListener { "server_version" => metadata.server_version, ); block.gathered_signatures.insert(slot_id, signature); - block.responded_signers.insert(signer_pubkey); + block.responded_signers.insert(slot_id); if block.total_weight_approved >= self.weight_threshold { // Signal to anyone waiting on this block that we have enough signatures @@ -364,7 +368,7 @@ impl StackerDBListener { else { info!( "StackerDBListener: Received rejection for block that we did not request. Ignoring."; - "block_signer_sighash" => %rejected_data.signer_signature_hash, + "signer_signature_hash" => %rejected_data.signer_signature_hash, "slot_id" => slot_id, "signer_set" => self.signer_set, ); @@ -384,14 +388,16 @@ impl StackerDBListener { continue; } }; - block.responded_signers.insert(rejected_pubkey); - block.total_weight_rejected = block - .total_weight_rejected - .checked_add(signer_entry.weight) - .expect("FATAL: total weight rejected exceeds u32::MAX"); + + if block.responded_signers.insert(slot_id) { + block.total_weight_rejected = block + .total_weight_rejected + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + } info!("StackerDBListener: Signer rejected block"; - "block_signer_sighash" => %rejected_data.signer_signature_hash, + "signer_signature_hash" => %rejected_data.signer_signature_hash, "signer_pubkey" => rejected_pubkey.to_hex(), "signer_slot_id" => slot_id, "signature" => %rejected_data.signature, @@ -434,6 +440,9 @@ impl StackerDBListener { | SignerMessageV0::MockBlock(_) => { debug!("Received mock message. Ignoring."); } + SignerMessageV0::StateMachineUpdate(_) => { + debug!("Received state machine update message. Ignoring."); + } }; } } @@ -496,6 +505,25 @@ impl StackerDBListenerComms { blocks.insert(block.signer_signature_hash(), block_status); } + /// Reset rejections for a block proposal. + /// This is used when a block proposal times out and we need to retry it by + /// clearing the block's rejections. Block approvals cannot be cleared + /// because an old approval could always be used to make a block reach + /// the approval threshold. + pub fn reset_rejections(&self, signer_sighash: &Sha512Trunc256Sum) { + let (lock, _cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + if let Some(block) = blocks.get_mut(signer_sighash) { + block.responded_signers.clear(); + block.total_weight_rejected = 0; + + // Add approving signers back to the responded signers set + for (slot_id, _) in block.gathered_signatures.iter() { + block.responded_signers.insert(*slot_id); + } + } + } + /// Get the status for `block` from the Stacker DB listener. /// If the block is not found in the map, return an error. /// If the block is found, call `condition` to check if the block status diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 3b970dc4d45..6cba1f9c837 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1730,6 +1730,17 @@ impl BlockMinerThread { ) }; + let Some(vrf_proof) = vrf_proof else { + error!( + "Unable to generate VRF proof, will be unable to mine"; + "burn_block_sortition_hash" => %self.burn_block.sortition_hash, + "burn_block_block_height" => %self.burn_block.block_height, + "burn_block_hash" => %self.burn_block.burn_header_hash, + "vrf_pubkey" => &self.registered_key.vrf_public_key.to_hex() + ); + return None; + }; + debug!( "Generated VRF Proof: {} over {} ({},{}) with key {}", vrf_proof.to_hex(), diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 146441d2ae4..8abe84a7d51 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -337,11 +337,12 @@ impl Node { ) .expect("FATAL: failed to initiate mempool"); - let mut event_dispatcher = EventDispatcher::new(); + let mut event_dispatcher = EventDispatcher::new(Some(config.get_working_dir())); for observer in &config.events_observers { - event_dispatcher.register_observer(observer, config.get_working_dir()); + event_dispatcher.register_observer(observer); } + event_dispatcher.process_pending_payloads(); let burnchain_config = config.get_burnchain(); @@ -658,10 +659,13 @@ impl Node { .expect("FATAL: failed to query canonical burn chain tip"); // Generates a proof out of the sortition hash provided in the params. - let vrf_proof = self.keychain.generate_proof( + let Some(vrf_proof) = self.keychain.generate_proof( registered_key.target_block_height, tip.sortition_hash.as_bytes(), - ); + ) else { + warn!("Failed to generate VRF proof, will be unable to initiate new tenure"); + return None; + }; // Generates a new secret key for signing the trail of microblocks // of the upcoming tenure. @@ -730,10 +734,13 @@ impl Node { if self.active_registered_key.is_some() { let registered_key = self.active_registered_key.clone().unwrap(); - let vrf_proof = self.keychain.generate_proof( + let Some(vrf_proof) = self.keychain.generate_proof( registered_key.target_block_height, burnchain_tip.block_snapshot.sortition_hash.as_bytes(), - ); + ) else { + warn!("Failed to generate VRF proof, will be unable to mine commits"); + return; + }; let op = self.generate_block_commit_op( anchored_block_from_ongoing_tenure.header.block_hash(), diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 335fb325d8a..beffb7c8956 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -91,10 +91,11 @@ impl RunLoop { config.burnchain.burn_fee_cap, ))); - let mut event_dispatcher = EventDispatcher::new(); + let mut event_dispatcher = EventDispatcher::new(Some(config.get_working_dir())); for observer in config.events_observers.iter() { - event_dispatcher.register_observer(observer, config.get_working_dir()); + event_dispatcher.register_observer(observer); } + event_dispatcher.process_pending_payloads(); Self { config, @@ -401,7 +402,7 @@ impl RunLoop { /// This function will block by looping infinitely. /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and - /// the nodes, taking turns on tenures. + /// the nodes, taking turns on tenures. pub fn start( &mut self, burnchain_opt: Option, diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 299335f35f8..f5c045b6a5c 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -117,6 +117,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, pub naka_miner_directives: RunLoopCounter, pub naka_submitted_commit_last_stacks_tip: RunLoopCounter, + pub naka_submitted_commit_last_commit_amount: RunLoopCounter, pub naka_miner_current_rejections: RunLoopCounter, pub naka_miner_current_rejections_timeout_secs: RunLoopCounter, @@ -178,6 +179,7 @@ impl Counters { &self, committed_burn_height: u64, committed_stacks_height: u64, + committed_sats_amount: u64, ) { Counters::inc(&self.naka_submitted_commits); Counters::set( @@ -188,6 +190,10 @@ impl Counters { &self.naka_submitted_commit_last_stacks_tip, committed_stacks_height, ); + Counters::set( + &self.naka_submitted_commit_last_commit_amount, + committed_sats_amount, + ); } pub fn bump_naka_mined_blocks(&self) { @@ -274,10 +280,11 @@ impl RunLoop { config.burnchain.burn_fee_cap, ))); - let mut event_dispatcher = EventDispatcher::new(); + let mut event_dispatcher = EventDispatcher::new(Some(config.get_working_dir())); for observer in config.events_observers.iter() { - event_dispatcher.register_observer(observer, config.get_working_dir()); + event_dispatcher.register_observer(observer); } + event_dispatcher.process_pending_payloads(); Self { config, diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 7462acd9637..a6b47ccc518 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::sync::atomic::Ordering; use std::{env, thread}; use clarity::vm::costs::ExecutionCost; @@ -14,6 +13,9 @@ use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; use stacks::config::{EventKeyType, InitialBalance}; +use stacks::core::test_util::{ + make_contract_call, make_contract_call_mblock_only, make_contract_publish, to_addr, +}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -21,15 +23,10 @@ use stacks::core::{ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; -use stacks_common::util::sleep_ms; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; -use crate::tests::{ - make_contract_call, make_contract_call_mblock_only, make_contract_publish, - make_contract_publish_microblock_only, run_until_burnchain_height, select_transactions_where, - to_addr, -}; +use crate::tests::{run_until_burnchain_height, select_transactions_where}; use crate::{neon, BitcoinRegtestController, BurnchainController, Keychain}; #[test] @@ -899,349 +896,3 @@ fn test_cost_limit_switch_version205() { channel.stop_chains_coordinator(); } - -// mine a stream of microblocks, and verify that microblock streams can get bigger after the epoch -// transition -#[test] -#[ignore] -fn bigger_microblock_streams_in_2_05() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 10492300000, - }); - } - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 0; - conf.node.max_microblocks = 65536; - conf.burnchain.max_rbf = 1000000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 206, - block_limit: ExecutionCost { - write_length: 15000000, - write_count: 7750, - read_length: 100000000, - read_count: 7750, - runtime: 5000000000, - }, - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 206, - end_height: 10_002, - block_limit: ExecutionCost { - write_length: 15000000, - write_count: 7750 * 2, - read_length: 100000000, - read_count: 7750 * 2, - runtime: 5000000000, - }, - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: ExecutionCost { - write_length: 15000000, - write_count: 7750 * 2, - read_length: 100000000, - read_count: 7750 * 2, - runtime: 5000000000, - }, - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); - - let txs: Vec> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - // almost fills a whole block - make_contract_publish_microblock_only( - spender_sk, - 0, - 1049230, - conf.burnchain.chain_id, - &format!("large-{ix}"), - &format!(" - ;; a single one of these transactions consumes over half the runtime budget - (define-constant BUFF_TO_BYTE (list - 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f - 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f - 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f - 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f - 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f - 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f - 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f - 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f - 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f - 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f - 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf - 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf - 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf - 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf - 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef - 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff - )) - (define-private (crash-me-folder (input (buff 1)) (ctr uint)) - (begin - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (+ u1 ctr) - ) - ) - (define-public (crash-me (name (string-ascii 128))) - (begin - (fold crash-me-folder BUFF_TO_BYTE u0) - (print name) - (ok u0) - ) - ) - (begin - (crash-me \"large-contract-{ix}\")) - " - ) - ) - }) - .collect(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let microblocks_processed = run_loop.get_microblocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // zeroth block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10492300000); - } - - let mut ctr = 0; - while ctr < txs.len() { - submit_tx(&http_origin, &txs[ctr]); - if !wait_for_microblocks(µblocks_processed, 30) { - // we time out if we *can't* mine any more microblocks - break; - } - ctr += 1; - } - microblocks_processed.store(0, Ordering::SeqCst); - - // only one fit - assert_eq!(ctr, 1); - sleep_ms(5_000); - - // confirm it - eprintln!("confirm epoch 2.0 microblock stream"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // send the rest of the transactions - while ctr < txs.len() { - submit_tx(&http_origin, &txs[ctr]); - ctr += 1; - } - - eprintln!("expect epoch transition"); - - microblocks_processed.store(0, Ordering::SeqCst); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // don't bother waiting for a microblock stream - - eprintln!("expect epoch 2.05 microblock stream"); - - microblocks_processed.store(0, Ordering::SeqCst); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for_microblocks(µblocks_processed, 180); - - microblocks_processed.store(0, Ordering::SeqCst); - - // this test can sometimes miss a mine block event. - sleep_ms(120_000); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let mut epoch_20_stream_cost = ExecutionCost::ZERO; - let mut epoch_205_stream_cost = ExecutionCost::ZERO; - - // max == largest number of transactions per stream in a given epoch (2.0 or 2.05) - // total == number of transactions across all streams in a given epoch (2.0 or 2.05) - let mut max_big_txs_per_microblock_20 = 0; - let mut total_big_txs_per_microblock_20 = 0; - - let mut max_big_txs_per_microblock_205 = 0; - let mut total_big_txs_per_microblock_205 = 0; - - let mut in_205; - let mut have_confirmed_205_stream; - - for i in 0..10 { - let blocks = test_observer::get_blocks(); - - max_big_txs_per_microblock_20 = 0; - total_big_txs_per_microblock_20 = 0; - - max_big_txs_per_microblock_205 = 0; - total_big_txs_per_microblock_205 = 0; - - in_205 = false; - have_confirmed_205_stream = false; - - // NOTE: this only counts the number of txs per stream, not in each microblock - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); - - let mut num_big_microblock_txs = 0; - let mut total_execution_cost = ExecutionCost::ZERO; - - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("costs-2") { - in_205 = true; - } else if tsc.name.to_string().contains("large") { - num_big_microblock_txs += 1; - if in_205 { - total_big_txs_per_microblock_205 += 1; - } else { - total_big_txs_per_microblock_20 += 1; - } - } - } - let execution_cost = tx.get("execution_cost").unwrap(); - total_execution_cost.read_count += - execution_cost.get("read_count").unwrap().as_i64().unwrap() as u64; - total_execution_cost.read_length += - execution_cost.get("read_length").unwrap().as_i64().unwrap() as u64; - total_execution_cost.write_count += - execution_cost.get("write_count").unwrap().as_i64().unwrap() as u64; - total_execution_cost.write_length += execution_cost - .get("write_length") - .unwrap() - .as_i64() - .unwrap() as u64; - total_execution_cost.runtime += - execution_cost.get("runtime").unwrap().as_i64().unwrap() as u64; - } - if in_205 && num_big_microblock_txs > max_big_txs_per_microblock_205 { - max_big_txs_per_microblock_205 = num_big_microblock_txs; - } - if !in_205 && num_big_microblock_txs > max_big_txs_per_microblock_20 { - max_big_txs_per_microblock_20 = num_big_microblock_txs; - } - - eprintln!("Epoch size: {total_execution_cost:?}"); - - if !in_205 && total_execution_cost.exceeds(&epoch_20_stream_cost) { - epoch_20_stream_cost = total_execution_cost; - break; - } - if in_205 && total_execution_cost.exceeds(&ExecutionCost::ZERO) { - have_confirmed_205_stream = true; - epoch_205_stream_cost = total_execution_cost; - break; - } - } - - if have_confirmed_205_stream { - break; - } else { - eprintln!("Trying to confirm a stream again (attempt {})", i + 1); - sleep_ms((i + 2) * 60_000); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } - } - - eprintln!( - "max_big_txs_per_microblock_20: {max_big_txs_per_microblock_20}, total_big_txs_per_microblock_20: {total_big_txs_per_microblock_20}" - ); - eprintln!( - "max_big_txs_per_microblock_205: {max_big_txs_per_microblock_205}, total_big_txs_per_microblock_205: {total_big_txs_per_microblock_205}" - ); - eprintln!("confirmed stream execution in 2.0: {epoch_20_stream_cost:?}"); - eprintln!("confirmed stream execution in 2.05: {epoch_205_stream_cost:?}"); - - // stuff happened - assert!(epoch_20_stream_cost.runtime > 0); - assert!(epoch_205_stream_cost.runtime > 0); - - // more stuff happened in epoch 2.05 - assert!(epoch_205_stream_cost.read_count > epoch_20_stream_cost.read_count); - assert!(epoch_205_stream_cost.read_length > epoch_20_stream_cost.read_length); - assert!(epoch_205_stream_cost.write_count > epoch_20_stream_cost.write_count); - assert!(epoch_205_stream_cost.write_length > epoch_20_stream_cost.write_length); - - // but epoch 2.05 was *cheaper* in terms of CPU - assert!(epoch_205_stream_cost.runtime < epoch_20_stream_cost.runtime); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index b287d2dec4b..83218866dd6 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -3,7 +3,7 @@ use std::{env, thread}; use ::core::str; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use clarity::vm::ClarityVersion; +use clarity::vm::{ClarityVersion, Value}; use stacks::burnchains::bitcoin::address::{ BitcoinAddress, LegacyBitcoinAddressType, SegwitBitcoinAddress, }; @@ -25,6 +25,7 @@ use stacks::chainstate::stacks::miner::{ use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{Config, InitialBalance}; +use stacks::core::test_util::make_contract_call; use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 3b3f8c19088..e9bdeb70222 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -2,13 +2,14 @@ use std::collections::HashMap; use std::{env, thread}; use clarity::vm::types::PrincipalData; -use clarity::vm::ClarityVersion; +use clarity::vm::{ClarityVersion, Value}; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use stacks::core::test_util::{make_contract_call, make_stacks_transfer}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 057669547a3..a003e8033f7 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -17,8 +17,10 @@ use std::collections::HashMap; use std::{env, thread}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use clarity::vm::Value; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::config::InitialBalance; +use stacks::core::test_util::make_contract_call; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index ffe95720453..82b5c5f45e1 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -27,6 +27,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::InitialBalance; +use stacks::core::test_util::{make_contract_call, to_addr}; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; @@ -42,7 +43,6 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, }; -use crate::tests::{make_contract_call, to_addr}; use crate::{neon, BitcoinRegtestController, BurnchainController}; #[cfg(test)] diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs deleted file mode 100644 index 3864d9c3507..00000000000 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::{env, thread}; - -use clarity::vm::types::PrincipalData; -use stacks::burnchains::{Burnchain, PoxConstants}; -use stacks::config::InitialBalance; -use stacks::core::{self, EpochList, StacksEpochId}; -use stacks_common::consts::STACKS_EPOCH_MAX; -use stacks_common::types::chainstate::StacksPrivateKey; - -use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::nakamoto_integrations::wait_for; -use crate::tests::neon_integrations::{ - get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, - test_observer, wait_for_runloop, -}; -use crate::tests::{make_stacks_transfer_mblock_only, to_addr}; -use crate::{neon, BitcoinRegtestController, BurnchainController}; - -#[test] -#[ignore] -fn microblocks_disabled() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let reward_cycle_len = 10; - let prepare_phase_len = 3; - let epoch_2_05 = 1; - let epoch_2_1 = 2; - let v1_unlock_height = epoch_2_1 + 1; - let epoch_2_2 = 3; // two blocks before next prepare phase. - let epoch_2_3 = 4; - let epoch_2_4 = 5; - let pox_3_activation_height = epoch_2_4; - let epoch_2_5 = 210; - - let spender_1_bal = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_2_bal = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - - let spender_1_sk = StacksPrivateKey::random(); - let spender_1_addr: PrincipalData = to_addr(&spender_1_sk).into(); - - let spender_2_sk = StacksPrivateKey::random(); - let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - - let mut initial_balances = vec![]; - - initial_balances.push(InitialBalance { - address: spender_1_addr.clone(), - amount: spender_1_bal, - }); - - initial_balances.push(InitialBalance { - address: spender_2_addr.clone(), - amount: spender_2_bal, - }); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - conf.node.mine_microblocks = true; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 1_000; - conf.node.wait_time_for_blocks = 2_000; - conf.miner.wait_for_block_download = false; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - test_observer::spawn(); - test_observer::register_any(&mut conf); - conf.initial_balances.append(&mut initial_balances); - - let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; - epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; - epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; - epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; - epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; - epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; - epochs[StacksEpochId::Epoch22].end_height = epoch_2_3; - epochs[StacksEpochId::Epoch23].start_height = epoch_2_3; - epochs[StacksEpochId::Epoch23].end_height = epoch_2_4; - epochs[StacksEpochId::Epoch24].start_height = epoch_2_4; - epochs[StacksEpochId::Epoch24].end_height = epoch_2_5; - epochs[StacksEpochId::Epoch25].start_height = epoch_2_5; - epochs[StacksEpochId::Epoch25].end_height = STACKS_EPOCH_MAX; - epochs.truncate_after(StacksEpochId::Epoch25); - conf.burnchain.epochs = Some(epochs); - - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - let pox_constants = PoxConstants::new( - reward_cycle_len, - prepare_phase_len, - 4 * prepare_phase_len / 5, - 5, - 15, - u64::MAX - 2, - u64::MAX - 1, - v1_unlock_height as u32, - epoch_2_2 as u32 + 1, - u32::MAX, - pox_3_activation_height as u32, - ); - burnchain_config.pox_constants = pox_constants; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let runloop_burnchain = burnchain_config; - - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // push us to block 205 - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Ensure we start off with 0 microblocks - assert!(test_observer::get_microblocks().is_empty()); - - let tx = make_stacks_transfer_mblock_only( - &spender_1_sk, - 0, - 500, - conf.burnchain.chain_id, - &spender_2_addr, - 500, - ); - submit_tx(&http_origin, &tx); - - // Wait for a microblock to be assembled - wait_for(60, || Ok(test_observer::get_microblocks().len() == 1)) - .expect("Failed to wait for microblocks to be assembled"); - - // mine Bitcoin blocks up until just before epoch 2.5 - wait_for(120, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_5 - 2 { - return Ok(true); - } - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - Ok(false) - }) - .expect("Failed to wait until just before epoch 2.5"); - - // Verify that the microblock was processed - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - let old_tip_info = get_chain_info(&conf); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.burn_block_height >= old_tip_info.burn_block_height + 3) - }) - .expect("Failed to process block"); - - info!("Test passed processing 2.5"); - - // Submit another microblock only transaction - let tx = make_stacks_transfer_mblock_only( - &spender_1_sk, - 1, - 500, - conf.burnchain.chain_id, - &spender_2_addr, - 500, - ); - submit_tx(&http_origin, &tx); - - // Wait for a microblock to be assembled, but expect none to be assembled - wait_for(30, || Ok(test_observer::get_microblocks().len() > 1)) - .expect_err("Microblocks should not have been assembled"); - - // Mine a block to see if the microblock gets processed - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second transaction should not have been processed! - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - let miner_nonce_before_microblock_assembly = get_account(&http_origin, &miner_account).nonce; - - // Now, lets tell the miner to try to mine microblocks, but don't try to confirm them! - info!("Setting STACKS_TEST_FORCE_MICROBLOCKS_POST_25"); - env::set_var("STACKS_TEST_FORCE_MICROBLOCKS_POST_25", "1"); - - // Wait for a second microblock to be assembled - wait_for(60, || Ok(test_observer::get_microblocks().len() == 2)) - .expect("Failed to wait for microblocks to be assembled"); - - // Mine a block to see if the microblock gets processed - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let miner_nonce_after_microblock_assembly = get_account(&http_origin, &miner_account).nonce; - - // second transaction should not have been processed -- even though we should have - // produced microblocks, they should not get accepted to the chain state - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - info!( - "Microblocks assembled: {}", - test_observer::get_microblocks().len() - ); - - // and our miner should have gotten some blocks accepted - assert_eq!( - miner_nonce_after_microblock_assembly, miner_nonce_before_microblock_assembly + 1, - "Mined before started microblock assembly: {miner_nonce_before_microblock_assembly}, Mined after started microblock assembly: {miner_nonce_after_microblock_assembly}" - ); - - // Now, tell the miner to try to confirm microblocks as well. - // This should test that the block gets rejected by append block - info!("Setting STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25"); - env::set_var("STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25", "1"); - - // Wait for a third microblock to be assembled - wait_for(60, || Ok(test_observer::get_microblocks().len() == 3)) - .expect("Failed to wait for microblocks to be assembled"); - - // Mine a block to see if the microblock gets processed - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let miner_nonce_after_microblock_confirmation = get_account(&http_origin, &miner_account).nonce; - - // our miner should not have gotten any more blocks accepted - assert_eq!( - miner_nonce_after_microblock_confirmation, - miner_nonce_after_microblock_assembly + 1, - "Mined after started microblock confimration: {miner_nonce_after_microblock_confirmation}", - ); - - // second transaction should not have been processed -- even though we should have - // produced microblocks, they should not get accepted to the chain state - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index a67d8ae2c89..0371a088c6b 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -26,6 +26,10 @@ use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; +use stacks::core::test_util::{ + make_contract_call, make_contract_publish, make_sponsored_stacks_transfer_on_testnet, + make_stacks_transfer, to_addr, +}; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -37,12 +41,8 @@ use stacks::net::api::getistraitimplemented::GetIsTraitImplementedResponse; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum}; -use super::{ - make_contract_call, make_contract_publish, make_stacks_transfer, to_addr, ADDR_4, SK_1, SK_2, - SK_3, -}; +use super::{ADDR_4, SK_1, SK_2, SK_3}; use crate::helium::RunLoop; -use crate::tests::make_sponsored_stacks_transfer_on_testnet; const OTHER_CONTRACT: &str = " (define-data-var x uint u0) diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index fa831815293..4268daab08d 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -15,6 +15,10 @@ use stacks::chainstate::stacks::{ }; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; +use stacks::core::test_util::{ + make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, + sign_standard_single_sig_tx_anchor_mode_version, to_addr, +}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; @@ -24,10 +28,7 @@ use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; use stacks_common::util::hash::*; use stacks_common::util::secp256k1::*; -use super::{ - make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, - serialize_sign_standard_single_sig_tx_anchor_mode_version, to_addr, SK_1, SK_2, -}; +use super::{SK_1, SK_2}; use crate::helium::RunLoop; use crate::Keychain; @@ -506,7 +507,7 @@ fn mempool_setup_chainstate() { 1000, TokenTransferMemo([0; 34]), ); - let tx_bytes = serialize_sign_standard_single_sig_tx_anchor_mode_version( + let tx = sign_standard_single_sig_tx_anchor_mode_version( payload, &contract_sk, 5, @@ -515,8 +516,8 @@ fn mempool_setup_chainstate() { TransactionAnchorMode::OnChainOnly, TransactionVersion::Mainnet, ); - let tx = - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); let e = chain_state .will_admit_mempool_tx( &NULL_BURN_STATE_DB, diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a4546d231b7..659f82aaea9 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -18,29 +18,21 @@ use std::sync::atomic::AtomicU64; use std::sync::{Arc, Mutex}; use clarity::vm::costs::ExecutionCost; -use clarity::vm::database::BurnStateDB; use clarity::vm::events::STXEventType; -use clarity::vm::types::PrincipalData; -use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; use neon_integrations::test_observer::EVENT_OBSERVER_PORT; use rand::Rng; use stacks::chainstate::burn::ConsensusHash; -use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StacksTransactionEvent; -use stacks::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; use stacks::chainstate::stacks::{ - CoinbasePayload, StacksBlock, StacksMicroblock, StacksMicroblockHeader, StacksPrivateKey, - StacksPublicKey, StacksTransaction, StacksTransactionSigner, TokenTransferMemo, - TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, - TransactionPostConditionMode, TransactionSmartContract, TransactionSpendingCondition, - TransactionVersion, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + StacksPrivateKey, StacksPublicKey, StacksTransaction, TransactionPayload, }; +#[cfg(any(test, feature = "testing"))] +use stacks::core::test_util::{make_contract_publish, to_addr}; use stacks::core::{StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_TESTNET}; -use stacks::util_lib::strings::StacksString; use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; +use stacks_common::types::chainstate::BlockHeaderHash; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, to_hex}; @@ -58,7 +50,6 @@ mod epoch_21; mod epoch_22; mod epoch_23; mod epoch_24; -mod epoch_25; mod integrations; mod mempool; pub mod nakamoto_integrations; @@ -133,222 +124,6 @@ pub fn insert_new_port(port: u16) -> bool { ports.insert(port) } -#[allow(clippy::too_many_arguments)] -pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - payer: &StacksPrivateKey, - sender_nonce: u64, - payer_nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> Vec { - serialize_sign_tx_anchor_mode_version( - payload, - sender, - Some(payer), - sender_nonce, - Some(payer_nonce), - tx_fee, - chain_id, - anchor_mode, - version, - ) -} - -pub fn serialize_sign_standard_single_sig_tx( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, -) -> Vec { - serialize_sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OnChainOnly, - ) -} - -pub fn serialize_sign_standard_single_sig_tx_anchor_mode( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, -) -> Vec { - serialize_sign_standard_single_sig_tx_anchor_mode_version( - payload, - sender, - nonce, - tx_fee, - chain_id, - anchor_mode, - TransactionVersion::Testnet, - ) -} - -pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> Vec { - serialize_sign_tx_anchor_mode_version( - payload, - sender, - None, - nonce, - None, - tx_fee, - chain_id, - anchor_mode, - version, - ) -} - -#[allow(clippy::too_many_arguments)] -pub fn serialize_sign_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - payer: Option<&StacksPrivateKey>, - sender_nonce: u64, - payer_nonce: Option, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> Vec { - let mut sender_spending_condition = - TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) - .expect("Failed to create p2pkh spending condition from public key."); - sender_spending_condition.set_nonce(sender_nonce); - - let auth = match (payer, payer_nonce) { - (Some(payer), Some(payer_nonce)) => { - let mut payer_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( - StacksPublicKey::from_private(payer), - ) - .expect("Failed to create p2pkh spending condition from public key."); - payer_spending_condition.set_nonce(payer_nonce); - payer_spending_condition.set_tx_fee(tx_fee); - TransactionAuth::Sponsored(sender_spending_condition, payer_spending_condition) - } - _ => { - sender_spending_condition.set_tx_fee(tx_fee); - TransactionAuth::Standard(sender_spending_condition) - } - }; - let mut unsigned_tx = StacksTransaction::new(version, auth, payload); - unsigned_tx.anchor_mode = anchor_mode; - unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; - unsigned_tx.chain_id = chain_id; - - let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); - tx_signer.sign_origin(sender).unwrap(); - if let (Some(payer), Some(_)) = (payer, payer_nonce) { - tx_signer.sign_sponsor(payer).unwrap(); - } - - let mut buf = vec![]; - tx_signer - .get_tx() - .unwrap() - .consensus_serialize(&mut buf) - .unwrap(); - buf -} - -pub fn make_contract_publish_versioned( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, - version: Option, -) -> Vec { - let name = ContractName::from(contract_name); - let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - - let payload = - TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) -} - -pub fn make_contract_publish( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, -) -> Vec { - make_contract_publish_versioned( - sender, - nonce, - tx_fee, - chain_id, - contract_name, - contract_content, - None, - ) -} - -pub fn make_contract_publish_microblock_only_versioned( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, - version: Option, -) -> Vec { - let name = ContractName::from(contract_name); - let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - - let payload = - TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - - serialize_sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ) -} - -pub fn make_contract_publish_microblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, -) -> Vec { - make_contract_publish_microblock_only_versioned( - sender, - nonce, - tx_fee, - chain_id, - contract_name, - contract_content, - None, - ) -} - pub fn new_test_conf() -> Config { // secretKey: "b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01", // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", @@ -409,183 +184,6 @@ pub fn set_random_binds(config: &mut Config) { config.node.p2p_address = format!("{localhost}:{p2p_port}"); } -pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { - StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(sk)], - ) - .unwrap() -} - -pub fn make_stacks_transfer( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) -} - -#[allow(clippy::too_many_arguments)] -pub fn make_sponsored_stacks_transfer_on_testnet( - sender: &StacksPrivateKey, - payer: &StacksPrivateKey, - sender_nonce: u64, - payer_nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_sponsored_sig_tx_anchor_mode_version( - payload, - sender, - payer, - sender_nonce, - payer_nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OnChainOnly, - TransactionVersion::Testnet, - ) -} - -pub fn make_stacks_transfer_mblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ) -} - -pub fn make_poison( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - header_1: StacksMicroblockHeader, - header_2: StacksMicroblockHeader, -) -> Vec { - let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) -} - -pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { - let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) -} - -#[allow(clippy::too_many_arguments)] -pub fn make_contract_call( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_addr: &StacksAddress, - contract_name: &str, - function_name: &str, - function_args: &[Value], -) -> Vec { - let contract_name = ContractName::from(contract_name); - let function_name = ClarityName::from(function_name); - - let payload = TransactionContractCall { - address: *contract_addr, - contract_name, - function_name, - function_args: function_args.to_vec(), - }; - - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) -} - -#[allow(clippy::too_many_arguments)] -pub fn make_contract_call_mblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_addr: &StacksAddress, - contract_name: &str, - function_name: &str, - function_args: &[Value], -) -> Vec { - let contract_name = ContractName::from(contract_name); - let function_name = ClarityName::from(function_name); - - let payload = TransactionContractCall { - address: *contract_addr, - contract_name, - function_name, - function_args: function_args.to_vec(), - }; - - serialize_sign_standard_single_sig_tx_anchor_mode( - payload.into(), - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ) -} - -fn make_microblock( - privk: &StacksPrivateKey, - chainstate: &mut StacksChainState, - burn_dbconn: &dyn BurnStateDB, - consensus_hash: ConsensusHash, - block: StacksBlock, - txs: Vec, -) -> StacksMicroblock { - let mut block_bytes = vec![]; - block.consensus_serialize(&mut block_bytes).unwrap(); - - let mut microblock_builder = StacksMicroblockBuilder::new( - block.block_hash(), - consensus_hash, - chainstate, - burn_dbconn, - BlockBuilderSettings::max_value(), - ) - .unwrap(); - let mempool_txs: Vec<_> = txs - .into_iter() - .map(|tx| { - // TODO: better fee estimation - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - (tx, tx_bytes.len() as u64) - }) - .collect(); - - // NOTE: we intentionally do not check the block's microblock pubkey hash against the private - // key, because we may need to test that microblocks get rejected due to bad signatures. - microblock_builder - .mine_next_microblock_from_txs(mempool_txs, privk) - .unwrap() -} - /// Deserializes the `StacksTransaction` objects from `blocks` and returns all those that /// match `test_fn`. pub fn select_transactions_where( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f1117be811e..97e4c22a8d0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -31,7 +31,8 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::{RejectReason, SignerMessage as SignerMessageV0}; use libsigner::{SignerSession, StackerDBSession}; -use rusqlite::OptionalExtension; +use rand::{thread_rng, Rng}; +use rusqlite::{Connection, OptionalExtension}; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -61,7 +62,10 @@ use stacks::chainstate::stacks::{ TransactionVersion, MAX_BLOCK_LEN, }; use stacks::config::{EventKeyType, InitialBalance}; -use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; +use stacks::core::mempool::{MemPoolWalkStrategy, MAXIMUM_MEMPOOL_TX_CHAINING}; +use stacks::core::test_util::{ + insert_tx_in_mempool, make_contract_call, make_contract_publish_versioned, make_stacks_transfer, +}; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -113,10 +117,7 @@ use crate::tests::neon_integrations::{ run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, wait_for_runloop, }; use crate::tests::signer::SignerTest; -use crate::tests::{ - gen_random_port, get_chain_info, make_contract_call, make_contract_publish, - make_contract_publish_versioned, make_stacks_transfer, to_addr, -}; +use crate::tests::{gen_random_port, get_chain_info, make_contract_publish, to_addr}; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; @@ -403,15 +404,15 @@ pub fn blind_signer_multinode( match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { - info!("Already signed block, will sleep and try again"; "signer_sig_hash" => signed_block.to_hex()); + info!("Already signed block, will sleep and try again"; "signer_signature_hash" => signed_block.to_hex()); thread::sleep(Duration::from_secs(5)); match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { - info!("Already signed block, ignoring"; "signer_sig_hash" => signed_block.to_hex()); + info!("Already signed block, ignoring"; "signer_signature_hash" => signed_block.to_hex()); continue; } - info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + info!("Signed block"; "signer_signature_hash" => signed_block.to_hex()); signed_blocks.insert(signed_block); } Err(e) => { @@ -420,7 +421,7 @@ pub fn blind_signer_multinode( }; continue; } - info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + info!("Signed block"; "signer_signature_hash" => signed_block.to_hex()); signed_blocks.insert(signed_block); } Err(e) => { @@ -482,7 +483,11 @@ pub fn get_latest_block_proposal( }); for (b, _, is_latest) in proposed_blocks.iter() { - info!("Consider block"; "signer_sighash" => %b.header.signer_signature_hash(), "is_latest_sortition" => is_latest, "chain_height" => b.header.chain_length); + info!("Consider block"; + "signer_signature_hash" => %b.header.signer_signature_hash(), + "is_latest_sortition" => is_latest, + "chain_height" => b.header.chain_length + ); } let Some((proposed_block, miner_addr, _)) = proposed_blocks.pop() else { @@ -540,20 +545,20 @@ pub fn read_and_sign_block_proposal( }) .collect(); let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); - let signer_sig_hash = proposed_block.header.signer_signature_hash(); + let signer_signature_hash = proposed_block.header.signer_signature_hash(); let other_views = other_views_result?; if !other_views.is_empty() { info!( "Fetched block proposals"; - "primary_latest_signer_sighash" => %signer_sig_hash, + "signer_signature_hash" => %signer_signature_hash, "primary_latest_block_height" => proposed_block.header.chain_length, "other_views" => ?other_views, ); } - if signed_blocks.contains(&signer_sig_hash) { + if signed_blocks.contains(&signer_signature_hash) { // already signed off on this block, don't sign again. - return Ok(signer_sig_hash); + return Ok(signer_signature_hash); } let reward_set = load_nakamoto_reward_set( @@ -576,7 +581,7 @@ pub fn read_and_sign_block_proposal( info!( "Fetched proposed block from .miners StackerDB"; "proposed_block_hash" => &proposed_block_hash, - "signer_sig_hash" => &signer_sig_hash.to_hex(), + "signer_signature_hash" => &signer_signature_hash.to_hex(), ); signers.sign_block_with_reward_set(&mut proposed_block, &reward_set); @@ -584,7 +589,7 @@ pub fn read_and_sign_block_proposal( channel .send(proposed_block.header.signer_signature) .unwrap(); - Ok(signer_sig_hash) + Ok(signer_signature_hash) } /// Return a working nakamoto-neon config and the miner's bitcoin address to fund @@ -887,7 +892,7 @@ pub fn boot_to_epoch_3( let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -950,7 +955,7 @@ pub fn boot_to_epoch_3( let signer_index = get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) .unwrap(); - let voting_tx = tests::make_contract_call( + let voting_tx = make_contract_call( signer_sk, 0, 300, @@ -1049,7 +1054,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -1112,7 +1117,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_index = get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) .unwrap(); - let voting_tx = tests::make_contract_call( + let voting_tx = make_contract_call( signer_sk, 0, 300, @@ -1287,7 +1292,7 @@ pub fn setup_epoch_3_reward_set( .to_rsv(); let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -2731,7 +2736,7 @@ fn correct_burn_outs() { .unwrap() .to_rsv(); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( account.0, account.2.nonce, 1000, @@ -3079,6 +3084,7 @@ fn block_proposal_api_endpoint() { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ); assert!( matches!(res, TransactionResult::Success(..)), @@ -4101,8 +4107,31 @@ fn follower_bootup_across_multiple_cycles() { debug!("Booted follower-thread"); - // Wait a long time for the follower to catch up because CI is slow. - wait_for(600, || { + // Wait some time for the follower to at least get some nakamoto blocks + wait_for(120, || { + thread::sleep(Duration::from_secs(5)); + let Ok(follower_node_info) = get_chain_info_result(&follower_conf) else { + return Ok(false); + }; + + let block_id = StacksBlockId::new( + &follower_node_info.stacks_tip_consensus_hash, + &follower_node_info.stacks_tip, + ); + let tip = NakamotoChainState::get_block_header(chainstate.db(), &block_id) + .unwrap() + .unwrap(); + info!( + "Latest follower tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + Ok(tip.anchored_header.as_stacks_nakamoto().is_some()) + }) + .unwrap(); + + wait_for(480, || { sleep_ms(1000); let Ok(follower_node_info) = get_chain_info_result(&follower_conf) else { return Ok(false); @@ -4675,7 +4704,7 @@ fn burn_ops_integration_test() { let signer_key_arg_1: StacksPublicKeyBuffer = signer_pk_1.to_bytes_compressed().as_slice().into(); - let set_signer_key_auth_tx = tests::make_contract_call( + let set_signer_key_auth_tx = make_contract_call( &signer_sk_1, 1, 500, @@ -6280,7 +6309,7 @@ fn clarity_burn_state() { // Pause mining to prevent the stacks block from being mined before the tenure change is processed TEST_MINE_STALL.set(true); // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) - let call_tx = tests::make_contract_call( + let call_tx = make_contract_call( &sender_sk, sender_nonce, tx_fee, @@ -6369,7 +6398,7 @@ fn clarity_burn_state() { result.expect_result_ok().expect("Read-only call failed"); // Submit a tx to trigger the next block - let call_tx = tests::make_contract_call( + let call_tx = make_contract_call( &sender_sk, sender_nonce, tx_fee, @@ -6642,6 +6671,7 @@ fn signer_chainstate() { ext: ExtraBlockInfo::None, state: BlockState::Unprocessed, validation_time_ms: None, + reject_reason: None, }) .unwrap(); @@ -6722,6 +6752,7 @@ fn signer_chainstate() { ext: ExtraBlockInfo::None, state: BlockState::GloballyAccepted, validation_time_ms: Some(1000), + reject_reason: None, }) .unwrap(); @@ -9645,12 +9676,14 @@ fn nakamoto_lockup_events() { wait_for(30, || Ok(get_stacks_height() > height_before)).unwrap(); } + wait_for(30, || { + let blocks = test_observer::get_blocks(); + let block = blocks.last().unwrap(); + Ok(block.get("block_height").unwrap().as_u64().unwrap() == unlock_height) + }) + .expect("Timed out waiting for test observer to reach unlock height"); let blocks = test_observer::get_blocks(); let block = blocks.last().unwrap(); - assert_eq!( - block.get("block_height").unwrap().as_u64().unwrap(), - unlock_height - ); let events = block.get("events").unwrap().as_array().unwrap(); let mut found_event = false; @@ -11111,6 +11144,8 @@ fn reload_miner_config() { // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); + let old_burn_fee_cap: u64 = 100000; + conf.burnchain.burn_fee_cap = old_burn_fee_cap; conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1000000); conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); @@ -11145,8 +11180,6 @@ fn reload_miner_config() { file.write_all(new_config.as_bytes()).unwrap(); }; - update_config(100000, 50); - let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); let run_loop_stopper = run_loop.get_termination_switch(); let counters = run_loop.counters(); @@ -11176,6 +11209,8 @@ fn reload_miner_config() { wait_for_first_naka_block_commit(60, &commits_submitted); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let burn_blocks = test_observer::get_burn_blocks(); @@ -11191,7 +11226,9 @@ fn reload_miner_config() { .map(|r| r.get("amt").unwrap().as_u64().unwrap()) .sum::(); - assert_eq!(reward_amount, 200000); + let burn_amount = burn_block.get("burn_amount").unwrap().as_u64().unwrap(); + + assert_eq!(reward_amount + burn_amount, old_burn_fee_cap); next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); @@ -11217,7 +11254,794 @@ fn reload_miner_config() { .map(|r| r.get("amt").unwrap().as_u64().unwrap()) .sum::(); - assert_eq!(reward_amount, new_amount); + let burn_amount = burn_block.get("burn_amount").unwrap().as_u64().unwrap(); + + assert_eq!(reward_amount + burn_amount, new_amount); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +/// Test that a new block commit is issued when the miner spend or config changes. +/// +/// The test boots into Nakamoto. Then, it waits for a block commit on the most recent +/// tip. The config is updated, and then the test ensures that a new commit was submitted after that +/// config change. +#[test] +#[ignore] +fn rbf_on_config_change() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + let _http_origin = format!("http://{}", &conf.node.rpc_bind); + conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.node.next_initiative_delay = 500; + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::random(); + let signer_addr = tests::to_addr(&signer_sk); + let sender_sk = Secp256k1PrivateKey::random(); + let recipient_sk = Secp256k1PrivateKey::random(); + let _recipient_addr = tests::to_addr(&recipient_sk); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let old_burn_fee_cap: u64 = 100000; + conf.burnchain.burn_fee_cap = old_burn_fee_cap; + conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1000000); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + + test_observer::spawn(); + test_observer::register(&mut conf, &[EventKeyType::AnyEvent]); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let conf_path = + std::env::temp_dir().join(format!("miner-config-test-{}.toml", rand::random::())); + conf.config_path = Some(conf_path.clone().to_str().unwrap().to_string()); + + // Make a minimum-viable config file + let update_config = |burn_fee_cap: u64, sats_vbyte: u64| { + use std::io::Write; + + let new_config = format!( + r#" + [burnchain] + burn_fee_cap = {} + satoshis_per_byte = {} + "#, + burn_fee_cap, sats_vbyte, + ); + // Write to a file + let mut file = File::create(&conf_path).unwrap(); + file.write_all(new_config.as_bytes()).unwrap(); + }; + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let counters = run_loop.counters(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + blind_signer(&conf, &signers, &counters); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); + + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let stacks_height = tip.stacks_block_height; + + let mut last_log = Instant::now(); + last_log -= Duration::from_secs(5); + wait_for(30, || { + let last_commit = &counters.naka_submitted_commit_last_stacks_tip.get(); + if last_log.elapsed() >= Duration::from_secs(5) { + info!( + "---- last_commit: {:?} stacks_height: {:?} ---- ", + last_commit, stacks_height + ); + last_log = Instant::now(); + } + Ok(*last_commit >= stacks_height) + }) + .expect("Failed to wait for last commit"); + + let commits_before = counters.naka_submitted_commits.get(); + + let commit_amount_before = counters.naka_submitted_commit_last_commit_amount.get(); + + info!("---- Updating config ----"); + + update_config(155000, 57); + + wait_for(30, || { + let commit_count = &counters.naka_submitted_commits.get(); + Ok(*commit_count > commits_before) + }) + .expect("Expected new commit after config change"); + + let commit_amount_after = counters.naka_submitted_commit_last_commit_amount.get(); + assert_eq!(commit_amount_after, 155000); + assert_ne!(commit_amount_after, commit_amount_before); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +/// This function intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool. It will then unpause block mining and check +/// how long it takes for the miner to mine the first block, and how long it +/// takes to empty the mempool. Several tests below call this function, testing +/// different strategies and fees. +fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.mempool_walk_strategy = strategy; + + let sender_signer_sk = Secp256k1PrivateKey::random(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times. + // With a fee of 180 to 2000 uSTX per send, we need each account to have + // 2001 * 25 = 50_025 uSTX. + // The 260 accounts in the middle will need to have enough to send that + // amount to 25 other accounts, plus the fee, and then enough to send the + // transfers themselves as well: + // (50025 + 180) * 25 + 50025 = 1_305_150 uSTX. + // The 10 initial accounts will need to have enough to send that amount to + // 25 other accounts, plus enough to send the transfers themselves as well: + // (1305150 + 180) * 25 + 1305150 = 33_938_400 uSTX. + let initial_balance = 33_938_400; + for addr in initial_sender_addrs.iter() { + naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); + } + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + test_observer::spawn(); + test_observer::register(&mut naka_conf, &[EventKeyType::MinedBlocks]); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + naka_conf.node.working_dir + ); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, &counters); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 1_305_150, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 50_025, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let fee = set_fee(); + assert!(fee >= 180 && fee <= 2000); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + fee, + naka_conf.burnchain.chain_id, + &recipient, + 1, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + + info!("Sending transfers took {:?}", timer.elapsed()); + + info!("Mining transfers"); + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + if strategy == MemPoolWalkStrategy::NextNonceWithHighestFeeRate { + assert!(last_block.tx_events.len() > 5000); + } + + // Wait for the transfers to all be mined + wait_for(7200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for transfers to be mined"); + + info!("Mining transfers took {:?}", timer.elapsed()); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +fn large_mempool_original_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || 180); +} + +#[test] +#[ignore] +fn large_mempool_original_random_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} + +#[test] +#[ignore] +fn large_mempool_next_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || 180); +} + +#[test] +#[ignore] +fn large_mempool_next_random_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts with random fees between +/// the minimum allowed fee of 180 uSTX and 2000 uSTX. It will then unpause +/// block mining and check how long it takes for the miner to mine the first +/// block, and how long it takes to empty the mempool. +fn larger_mempool() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + + let sender_signer_sk = Secp256k1PrivateKey::random(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 uSTX per send, we need each account to end up with + // 2001 * 25 * 10 = 500_250 uSTX. + // The 260 accounts in the middle will need to have + // (500250 + 180) * 26 = 13_011_180 uSTX. + // The 10 initial accounts will need to have + // (13011180 + 180) * 26 = 338_295_360 uSTX. + let initial_balance = 338_295_360; + for addr in initial_sender_addrs.iter() { + naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); + } + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + naka_conf.node.working_dir + ); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_proposed_blocks, + .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, &counters); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 13_011_180, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 500_250, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..10 { + let db_tx = conn.transaction().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let fee = thread_rng().gen_range(180..2000); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + fee, + naka_conf.burnchain.chain_id, + &recipient, + 1, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + } + + info!("Sending transfers took {:?}", timer.elapsed()); + + let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); + + info!("Mining transfers"); + + let timer = Instant::now(); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(10, || { + let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); + Ok(blocks_proposed > blocks_proposed_before) + }) + .expect("Timed out waiting for first block to be mined"); + + info!("Mining first block of transfers took {:?}", timer.elapsed()); + + // Wait for the transfers to all be mined + wait_for(7200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for transfers to be mined"); + + info!("Mining transfers took {:?}", timer.elapsed()); coord_channel .lock() diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 68b8474efb3..528c3110327 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -11,7 +11,6 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; -use rand::Rng; use rusqlite::params; use serde::Deserialize; use serde_json::json; @@ -30,18 +29,21 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::POX_4_NAME; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ - signal_mining_blocked, signal_mining_ready, TransactionErrorEvent, TransactionEvent, - TransactionSuccessEvent, + TransactionErrorEvent, TransactionEvent, TransactionSuccessEvent, }; use stacks::chainstate::stacks::{ - StacksBlock, StacksBlockHeader, StacksMicroblock, StacksMicroblockHeader, StacksPrivateKey, - StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, + StacksBlock, StacksBlockHeader, StacksMicroblock, StacksPrivateKey, StacksPublicKey, + StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; use stacks::cli; use stacks::codec::StacksMessageCodec; use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; +use stacks::core::test_util::{ + make_contract_call, make_contract_publish, make_contract_publish_microblock_only, + make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, +}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, BLOCK_LIMIT_MAINNET_21, CHAIN_ID_TESTNET, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, @@ -78,11 +80,7 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; use super::bitcoin_regtest::BitcoinCoreController; -use super::{ - make_contract_call, make_contract_publish, make_contract_publish_microblock_only, - make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, ADDR_4, SK_1, - SK_2, SK_3, -}; +use super::{ADDR_4, SK_1, SK_2, SK_3}; use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; @@ -90,8 +88,6 @@ use crate::stacks_common::types::PrivateKey; use crate::syncctl::PoxSyncWatchdogComms; use crate::tests::gen_random_port; use crate::tests::nakamoto_integrations::{get_key_for_cycle, wait_for}; -use crate::util::hash::{MerkleTree, Sha512Trunc256Sum}; -use crate::util::secp256k1::MessageSignature; use crate::{neon, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAddress) { @@ -1574,76 +1570,6 @@ fn deep_contract() { test_observer::clear(); } -#[test] -#[ignore] -fn bad_microblock_pubkey() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let (mut conf, _miner_account) = neon_integration_test_conf(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // fault injection - env::set_var( - "STACKS_MICROBLOCK_PUBKEY_HASH", - "0000000000000000000000000000000000000000", - ); - for _i in 0..10 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } - env::set_var("STACKS_MICROBLOCK_PUBKEY_HASH", ""); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let blocks = test_observer::get_blocks(); - assert!(blocks.len() >= 5); - assert!(blocks.len() <= 6); - - channel.stop_chains_coordinator(); - test_observer::clear(); -} - #[test] #[ignore] fn liquid_ustx_integration() { @@ -3386,71 +3312,53 @@ fn should_fix_2771() { channel.stop_chains_coordinator(); } -/// Returns a StacksMicroblock with the given transactions, sequence, and parent block that is -/// signed with the given private key. -fn make_signed_microblock( - block_privk: &StacksPrivateKey, - txs: Vec, - parent_block: BlockHeaderHash, - seq: u16, -) -> StacksMicroblock { - let mut rng = rand::thread_rng(); - - let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); - let merkle_tree = MerkleTree::::new(&txid_vecs); - let tx_merkle_root = merkle_tree.root(); - - let mut mblock = StacksMicroblock { - header: StacksMicroblockHeader { - version: rng.gen(), - sequence: seq, - prev_block: parent_block, - tx_merkle_root, - signature: MessageSignature([0u8; 65]), - }, - txs, - }; - mblock.sign(block_privk).unwrap(); - mblock -} - #[test] #[ignore] -fn microblock_fork_poison_integration_test() { +fn filter_low_fee_tx_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let second_spender_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); - let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 100300, - }); - conf.initial_balances.push(InitialBalance { - address: second_spender_addr.clone(), - amount: 10000, - }); - - // we'll manually post a forked stream to the node - conf.node.mine_microblocks = false; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 1_000; - conf.miner.first_attempt_time_ms = 2_000; - conf.miner.subsequent_attempt_time_ms = 5_000; - conf.node.wait_time_for_blocks = 1_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let txs: Vec<_> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - test_observer::spawn(); - test_observer::register_any(&mut conf); + if ix < 5 { + // low-fee + make_stacks_transfer( + spender_sk, + 0, + 1000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) + } else { + // high-fee + make_stacks_transfer( + spender_sk, + 0, + 2000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) + } + }) + .collect(); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3464,10 +3372,8 @@ fn microblock_fork_poison_integration_test() { eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf.clone()); + let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - let miner_status = run_loop.get_miner_status(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -3484,219 +3390,169 @@ fn microblock_fork_poison_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); - // turn off the miner for now, so we can ensure both of these get accepted and preprocessed - // before we try and mine an anchor block that confirms them - eprintln!("Disable miner"); - signal_mining_blocked(miner_status.clone()); - sleep_ms(10_000); + for tx in txs.iter() { + submit_tx(&http_origin, tx); + } - // our first spender - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, 100300); - assert_eq!(account.nonce, 0); + // mine a couple more blocks + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // our second spender - let account = get_account(&http_origin, &second_spender_addr); - assert_eq!(account.balance, 10000); - assert_eq!(account.nonce, 0); + // First five accounts have a transaction. The miner will consider low fee transactions, + // but rank by estimated fee rate. + // Last five accounts have transaction + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, spender_addr); + assert_eq!(account.nonce, 1); + } - info!("Test microblock"); + channel.stop_chains_coordinator(); +} - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - let unconfirmed_tx = - StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); - let second_unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &second_spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1500, - ); - let second_unconfirmed_tx = - StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); +#[test] +#[ignore] +fn filter_long_runtime_tx_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - // TODO (hack) instantiate the sortdb in the burnchain - let _ = btc_regtest_controller.sortdb_mut(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - // put each into a microblock - let (first_microblock, second_microblock) = { - let tip_info = get_chain_info(&conf); - let stacks_tip = tip_info.stacks_tip; + let (mut conf, _) = neon_integration_test_conf(); + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } - let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); - let tip_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); - let privk = - find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024) - .unwrap(); - let (mut chainstate, _) = StacksChainState::open( - false, - CHAIN_ID_TESTNET, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); + // ...but none of them will be mined since we allot zero ms to do so + conf.miner.first_attempt_time_ms = 0; + conf.miner.subsequent_attempt_time_ms = 0; - chainstate - .reload_unconfirmed_state( - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), - tip_hash, + let txs: Vec<_> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + make_stacks_transfer( + spender_sk, + 0, + 1000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, ) - .unwrap(); - let iconn = btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(); - let first_microblock = make_microblock( - &privk, - &mut chainstate, - &iconn, - consensus_hash, - stacks_block, - vec![unconfirmed_tx], - ); + }) + .collect(); - eprintln!( - "Created first microblock: {}: {first_microblock:?}", - &first_microblock.block_hash() - ); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - // NOTE: this microblock conflicts because it has the same parent as the first microblock, - // even though it's seq is different. - let second_microblock = - make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); - eprintln!( - "Created second conflicting microblock: {}: {second_microblock:?}", - &second_microblock.block_hash() - ); - (first_microblock, second_microblock) - }; + btc_regtest_controller.bootstrap_chain(201); - let mut microblock_bytes = vec![]; - first_microblock - .consensus_serialize(&mut microblock_bytes) - .unwrap(); + eprintln!("Chain bootstrapped..."); - // post the first microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(microblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); - assert_eq!(res, format!("{}", &first_microblock.block_hash())); + let channel = run_loop.get_coordinator_channel().unwrap(); - let mut second_microblock_bytes = vec![]; - second_microblock - .consensus_serialize(&mut second_microblock_bytes) - .unwrap(); + thread::spawn(move || run_loop.start(None, 0)); - // post the second microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(second_microblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - assert_eq!(res, format!("{}", &second_microblock.block_hash())); + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Wait 10s and re-enable miner"); - sleep_ms(10_000); + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // resume mining - eprintln!("Enable miner"); - signal_mining_ready(miner_status); - sleep_ms(10_000); + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Attempt to mine poison-microblock"); - let mut found = false; - for _i in 0..10 { - if found { - break; - } - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let blocks = test_observer::get_blocks(); - for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - - if let TransactionPayload::PoisonMicroblock(..) = &parsed.payload { - found = true; - break; - } - } - } + for tx in txs.iter() { + submit_tx(&http_origin, tx); } - assert!( - found, - "Did not find poison microblock tx in any mined block" - ); + // mine a couple more blocks + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // no transactions mined + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + } - test_observer::clear(); channel.stop_chains_coordinator(); } #[test] #[ignore] -fn microblock_integration_test() { +fn miner_submit_twice() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let second_spender_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); - let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); - - let (mut conf, miner_account) = neon_integration_test_conf(); + let contract_content = " + (define-public (foo (a int)) + (ok (* 2 (+ a 1)))) + (define-private (bar) + (foo 56)) + "; - conf.miner.wait_for_block_download = false; + let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), - amount: 100300, - }); - conf.initial_balances.push(InitialBalance { - address: second_spender_addr.clone(), - amount: 10000, + amount: 1049230, }); - conf.node.mine_microblocks = true; - conf.node.microblock_frequency = 1_000; - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; + conf.node.mine_microblocks = false; + // one should be mined in first attempt, and two should be in second attempt + conf.miner.first_attempt_time_ms = 20; + conf.miner.subsequent_attempt_time_ms = 30_000; - test_observer::spawn(); - test_observer::register_any(&mut conf); + let tx_1 = make_contract_publish( + &spender_sk, + 0, + 50_000, + conf.burnchain.chain_id, + "first-contract", + contract_content, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 50_000, + conf.burnchain.chain_id, + "second-contract", + contract_content, + ); + + // note: this test depends on timing of how long it takes to assemble a block, + // but it won't flake if the miner behaves correctly: a correct miner should + // always be able to mine both transactions by the end of this test. an incorrect + // miner may sometimes pass this test though, if they can successfully mine a + // 2-transaction block in 20 ms *OR* if they are slow enough that they mine a + // 0-transaction block in that time (because this would trigger a re-attempt, which + // is exactly what this test is measuring). + // + // The "fixed" behavior is the corner case where a miner did a "first attempt", which + // included 1 or more transaction, but they could have made a second attempt with + // more transactions. let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3710,9 +3566,8 @@ fn microblock_integration_test() { eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf.clone()); + let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -3730,510 +3585,208 @@ fn microblock_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: - info!("Miner account: {miner_account}"); - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 1); + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // mine a couple more blocks + // waiting enough time between them that a second attempt could be made. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + thread::sleep(Duration::from_secs(15)); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // and our first spender + // 1 transaction mined let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, 100300); - assert_eq!(account.nonce, 0); + assert_eq!(account.nonce, 2); - // and our second spender - let account = get_account(&http_origin, &second_spender_addr); - assert_eq!(account.balance, 10000); - assert_eq!(account.nonce, 0); + channel.stop_chains_coordinator(); +} - // okay, let's push a transaction that is marked microblock only! - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer_mblock_only( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - submit_tx(&http_origin, &tx); +#[test] +#[ignore] +fn size_check_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - info!("Try to mine a microblock-only tx"); + let mut giant_contract = "(define-public (f) (ok 1))".to_string(); + for _i in 0..(1024 * 1024 + 500) { + giant_contract.push(' '); + } - // now let's mine a couple blocks, and then check the sender's nonce. - // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - // this one will contain the sortition from above anchor block, - // which *should* have also confirmed the microblock. - info!("Wait for second block"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); + let (mut conf, miner_account) = neon_integration_test_conf(); - // I guess let's push another block for good measure? - info!("Wait for third block"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); + // make a bunch of txs that will only fit one per block. + let txs: Vec<_> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + if ix % 2 == 0 { + make_contract_publish( + spender_sk, + 0, + 1049230, + conf.burnchain.chain_id, + "large-0", + &giant_contract, + ) + } else { + let tx = make_contract_publish_microblock_only( + spender_sk, + 0, + 1049230, + conf.burnchain.chain_id, + "large-0", + &giant_contract, + ); + let parsed_tx = StacksTransaction::consensus_deserialize(&mut &tx[..]).unwrap(); + debug!("Mine transaction {} in a microblock", &parsed_tx.txid()); + tx + } + }) + .collect(); - info!("Test microblock"); + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } - // microblock must have bumped our nonce - // and our spender - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 1); + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 5000; + conf.node.microblock_frequency = 5000; + conf.miner.microblock_attempt_time_ms = 120_000; - // push another two transactions that are marked microblock only - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &spender_sk, - 1, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - let unconfirmed_tx = - StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); - let second_unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &second_spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1500, - ); - let second_unconfirmed_tx = - StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - // TODO (hack) instantiate the sortdb in the burnchain - let _ = btc_regtest_controller.sortdb_mut(); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - // put each into a microblock - let (first_microblock, second_microblock) = { - let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); - let tip_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); - let privk = - find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024) - .unwrap(); - let (mut chainstate, _) = StacksChainState::open( - false, - CHAIN_ID_TESTNET, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); - chainstate - .reload_unconfirmed_state( - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), - tip_hash, - ) - .unwrap(); - let iconn = btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(); - let first_microblock = make_microblock( - &privk, - &mut chainstate, - &iconn, - consensus_hash, - stacks_block, - vec![unconfirmed_tx], - ); + btc_regtest_controller.bootstrap_chain(201); - eprintln!( - "Created first microblock: {}: {first_microblock:?}", - &first_microblock.block_hash() - ); - /* - let second_microblock = - make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); - */ - let second_microblock = make_signed_microblock( - &privk, - vec![second_unconfirmed_tx], - first_microblock.block_hash(), - 1, - ); - eprintln!( - "Created second microblock: {}: {second_microblock:?}", - &second_microblock.block_hash() - ); - (first_microblock, second_microblock) - }; + eprintln!("Chain bootstrapped..."); - let mut microblock_bytes = vec![]; - first_microblock - .consensus_serialize(&mut microblock_bytes) - .unwrap(); + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); - // post the first microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(microblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + let channel = run_loop.get_coordinator_channel().unwrap(); - assert_eq!(res, format!("{}", &first_microblock.block_hash())); + thread::spawn(move || run_loop.start(None, 0)); - eprintln!("\n\nBegin testing\nmicroblock: {first_microblock:?}\n\n"); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 98300); + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut second_microblock_bytes = vec![]; - second_microblock - .consensus_serialize(&mut second_microblock_bytes) - .unwrap(); + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // post the second microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(second_microblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - assert_eq!(res, format!("{}", &second_microblock.block_hash())); + // let's query the miner's account nonce: + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.nonce, 1); + assert_eq!(account.balance, 0); + // and our potential spenders: - sleep_ms(5_000); - - let mut iter_count = 0; - let tip_info = loop { - let tip_info = get_chain_info(&conf); - eprintln!("{:#?}", tip_info); - match tip_info.unanchored_tip { - None => { - iter_count += 1; - assert!( - iter_count < 10, - "Hit retry count while waiting for net module to process pushed microblock" - ); - sleep_ms(5_000); - continue; - } - Some(_tip) => break tip_info, - } - }; - - assert!(tip_info.stacks_tip_height >= 3); - let stacks_tip = tip_info.stacks_tip; - let stacks_tip_consensus_hash = tip_info.stacks_tip_consensus_hash; - let stacks_id_tip = - StacksBlockHeader::make_index_block_hash(&stacks_tip_consensus_hash, &stacks_tip); - - // todo - pipe in the PoxSyncWatchdog to the RunLoop struct to avoid flakiness here - // wait at least two p2p refreshes so it can produce the microblock - for i in 0..30 { - info!( - "wait {} more seconds for microblock miner to find our transaction...", - 30 - i - ); - sleep_ms(1000); - } - - // check event observer for new microblock event (expect at least 2) - let mut microblock_events = test_observer::get_microblocks(); - assert!(microblock_events.len() >= 2); - - // this microblock should correspond to `second_microblock` - let microblock = microblock_events.pop().unwrap(); - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); - assert_eq!(transactions.len(), 1); - let tx_sequence = transactions[0] - .get("microblock_sequence") - .unwrap() - .as_u64() - .unwrap(); - assert_eq!(tx_sequence, 1); - let microblock_hash = transactions[0] - .get("microblock_hash") - .unwrap() - .as_str() - .unwrap(); - assert_eq!( - microblock_hash[2..], - format!("{}", second_microblock.header.block_hash()) - ); - let microblock_associated_hash = microblock - .get("parent_index_block_hash") - .unwrap() - .as_str() - .unwrap(); - let index_block_hash_bytes = hex_bytes(µblock_associated_hash[2..]).unwrap(); - assert_eq!( - StacksBlockId::from_vec(&index_block_hash_bytes), - Some(stacks_id_tip) - ); - // make sure we have stats for the burn block - let _burn_block_hash = microblock.get("burn_block_hash").unwrap().as_str().unwrap(); - let _burn_block_height = microblock - .get("burn_block_height") - .unwrap() - .as_u64() - .unwrap(); - let _burn_block_timestamp = microblock - .get("burn_block_timestamp") - .unwrap() - .as_u64() - .unwrap(); - - // this microblock should correspond to the first microblock that was posted - let microblock = microblock_events.pop().unwrap(); - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); - assert_eq!(transactions.len(), 1); - let tx_sequence = transactions[0] - .get("microblock_sequence") - .unwrap() - .as_u64() - .unwrap(); - assert_eq!(tx_sequence, 0); - - // check mempool tx events - let memtx_events = test_observer::get_memtxs(); - assert_eq!(memtx_events.len(), 1); - assert_eq!(&memtx_events[0], &format!("0x{}", &bytes_to_hex(&tx))); - - // let's make sure the returned blocks all point at each other. - let blocks_observed = test_observer::get_blocks(); - // we at least mined 5 blocks - assert!( - blocks_observed.len() >= 3, - "Blocks observed {} should be >= 3", - blocks_observed.len() - ); - assert_eq!(blocks_observed.len() as u64, tip_info.stacks_tip_height + 1); - - let burn_blocks_observed = test_observer::get_burn_blocks(); - let burn_blocks_with_burns: Vec<_> = burn_blocks_observed - .into_iter() - .filter(|block| block.get("burn_amount").unwrap().as_u64().unwrap() > 0) - .collect(); - assert!( - burn_blocks_with_burns.len() >= 3, - "Burn block sortitions {} should be >= 3", - burn_blocks_with_burns.len() - ); - for burn_block in burn_blocks_with_burns { - eprintln!("{burn_block}"); + for spender_addr in spender_addrs.iter() { + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + assert_eq!(account.balance, 1049230); } - let mut prior = None; - for block in blocks_observed.iter() { - let parent_index_hash = block - .get("parent_index_block_hash") - .unwrap() - .as_str() - .unwrap() - .to_string(); - let my_index_hash = block - .get("index_block_hash") - .unwrap() - .as_str() - .unwrap() - .to_string(); - if let Some(ref previous_index_hash) = prior { - assert_eq!(&parent_index_hash, previous_index_hash); - } - - // make sure we have a burn_block_hash, burn_block_height and miner_txid - - let _burn_block_hash = block.get("burn_block_hash").unwrap().as_str().unwrap(); - - let _burn_block_height = block.get("burn_block_height").unwrap().as_u64().unwrap(); - - let _miner_txid = block.get("miner_txid").unwrap().as_str().unwrap(); - - // make sure we have stats for the previous burn block - let _parent_burn_block_hash = block - .get("parent_burn_block_hash") - .unwrap() - .as_str() - .unwrap(); - - let _parent_burn_block_height = block - .get("parent_burn_block_height") - .unwrap() - .as_u64() - .unwrap(); - - let _parent_burn_block_timestamp = block - .get("parent_burn_block_timestamp") - .unwrap() - .as_u64() - .unwrap(); - - prior = Some(my_index_hash); + for tx in txs.iter() { + // okay, let's push a bunch of transactions that can only fit one per block! + submit_tx(&http_origin, tx); } - // we can query unconfirmed state from the microblock we announced - let path = format!( - "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", - &tip_info.unanchored_tip.unwrap() - ); - - eprintln!("{path:?}"); + let mut micro_block_txs = 0; + let mut anchor_block_txs = 0; - let mut iter_count = 0; - let res = loop { - let http_resp = client.get(&path).send().unwrap(); + for i in 0..100 { + // now let's mine a couple blocks, and then check the sender's nonce. + // at the end of mining three blocks, there should be _at least one_ transaction from the microblock + // only set that got mined (since the block before this one was empty, a microblock can + // be added), + // and a number of transactions from equal to the number anchor blocks will get mined. + // + // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // this one will contain the sortition from above anchor block, + // which *should* have also confirmed the microblock. + sleep_ms(10_000 * i); - info!("{:?}", http_resp); + micro_block_txs = 0; + anchor_block_txs = 0; - match http_resp.json::() { - Ok(x) => break x, - Err(e) => { - warn!("Failed to query {path}; will try again. Err = {e:?}"); - iter_count += 1; - assert!(iter_count < 10, "Retry limit reached querying account"); - sleep_ms(1000); - continue; + // let's figure out how many micro-only and anchor-only txs got accepted + // by examining our account nonces: + for (ix, spender_addr) in spender_addrs.iter().enumerate() { + let res = get_account(&http_origin, &spender_addr); + if res.nonce == 1 { + if ix % 2 == 0 { + anchor_block_txs += 1; + } else { + micro_block_txs += 1; + } + } else if res.nonce != 0 { + panic!("Spender address nonce incremented past 1"); } - }; - }; - - info!("Account Response = {res:#?}"); - assert_eq!(res.nonce, 2); - assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 96300); - - // limited by chaining - for next_nonce in 2..5 { - // verify that the microblock miner can automatically pick up transactions - debug!("Try to send unconfirmed tx from {spender_addr} to {recipient} nonce {next_nonce}"); - let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &spender_sk, - next_nonce, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(unconfirmed_tx_bytes.clone()) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]) - .unwrap() - .txid() - .to_string() - ); - eprintln!("Sent {res}"); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - // wait at least two p2p refreshes - // so it can produce the microblock - for i in 0..30 { - debug!( - "wait {} more seconds for microblock miner to find our transaction...", - 30 - i - ); - sleep_ms(1000); + debug!("Spender {ix},{spender_addr}: {res:?}"); } - // we can query _new_ unconfirmed state from the microblock we announced - let path = format!( - "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", - &tip_info.unanchored_tip.unwrap() - ); - - let res_text = client.get(&path).send().unwrap().text().unwrap(); - - eprintln!("text of {path}\n{res_text}"); - - let res = client - .get(&path) - .send() - .unwrap() - .json::() - .unwrap(); - eprintln!("{path:?}"); - eprintln!("{res:#?}"); + eprintln!("anchor_block_txs: {anchor_block_txs}, micro_block_txs: {micro_block_txs}"); - // advanced! - assert_eq!(res.nonce, next_nonce + 1); - assert_eq!( - u128::from_str_radix(&res.balance[2..], 16).unwrap(), - (96300 - 2000 * (next_nonce - 1)) as u128 - ); + if anchor_block_txs >= 2 && micro_block_txs >= 2 { + break; + } } + assert!(anchor_block_txs >= 2); + assert!(micro_block_txs >= 2); + test_observer::clear(); channel.stop_chains_coordinator(); } #[test] #[ignore] -fn filter_low_fee_tx_integration_test() { +fn block_replay_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } + let (mut conf, miner_account) = neon_integration_test_conf(); - let txs: Vec<_> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 100300, + }); - if ix < 5 { - // low-fee - make_stacks_transfer( - spender_sk, - 0, - 1000 + (ix as u64), - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ) - } else { - // high-fee - make_stacks_transfer( - spender_sk, - 0, - 2000 + (ix as u64), - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ) - } - }) - .collect(); + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 30000; + conf.node.microblock_frequency = 5_000; + + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + + test_observer::spawn(); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4247,8 +3800,9 @@ fn filter_low_fee_tx_integration_test() { eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); + let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -4266,187 +3820,139 @@ fn filter_low_fee_tx_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - for tx in txs.iter() { - submit_tx(&http_origin, tx); - } - - // mine a couple more blocks - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // let's query the miner's account nonce: - // First five accounts have a transaction. The miner will consider low fee transactions, - // but rank by estimated fee rate. - // Last five accounts have transaction - for spender_addr in &spender_addrs { - let account = get_account(&http_origin, spender_addr); - assert_eq!(account.nonce, 1); - } + info!("Miner account: {miner_account}"); + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.balance, 0); + assert_eq!(account.nonce, 1); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn filter_long_runtime_tx_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - - let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } - - // ...but none of them will be mined since we allot zero ms to do so - conf.miner.first_attempt_time_ms = 0; - conf.miner.subsequent_attempt_time_ms = 0; - - let txs: Vec<_> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - make_stacks_transfer( - spender_sk, - 0, - 1000 + (ix as u64), - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ) - }) - .collect(); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); + // and our spender + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.balance, 100300); + assert_eq!(account.nonce, 0); - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + let tx = make_stacks_transfer( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); + submit_tx(&http_origin, &tx); - // first block wakes up the run loop next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // first block will hold our VRF registration next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - for tx in txs.iter() { - submit_tx(&http_origin, tx); - } + // try and push the mined block back at the node lots of times + let (tip_consensus_hash, tip_block) = get_tip_anchored_block(&conf); + let mut tip_block_bytes = vec![]; + tip_block.consensus_serialize(&mut tip_block_bytes).unwrap(); - // mine a couple more blocks - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + for i in 0..1024 { + let path = format!("{http_origin}/v2/blocks/upload/{tip_consensus_hash}"); + let res_text = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tip_block_bytes.clone()) + .send() + .unwrap() + .text() + .unwrap(); - // no transactions mined - for spender_addr in &spender_addrs { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); + eprintln!("{i}: text of {path}\n{res_text}"); } + test_observer::clear(); channel.stop_chains_coordinator(); } #[test] #[ignore] -fn miner_submit_twice() { +fn cost_voting_integration() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk = StacksPrivateKey::random(); - let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let contract_content = " - (define-public (foo (a int)) - (ok (* 2 (+ a 1)))) - (define-private (bar) - (foo 56)) + // let's make `<` free... + let cost_definer_src = " + (define-read-only (cost-definition-le (size uint)) + { + runtime: u0, write_length: u0, write_count: u0, read_count: u0, read_length: u0 + }) "; - let (mut conf, _) = neon_integration_test_conf(); - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); + // the contract that we'll test the costs of + let caller_src = " + (define-public (execute-2 (a uint)) + (ok (< a a))) + "; - conf.node.mine_microblocks = false; - // one should be mined in first attempt, and two should be in second attempt - conf.miner.first_attempt_time_ms = 20; - conf.miner.subsequent_attempt_time_ms = 30_000; + let power_vote_src = " + (define-public (propose-vote-confirm) + (let + ((proposal-id (unwrap-panic (contract-call? 'ST000000000000000000002AMW42H.cost-voting submit-proposal + 'ST000000000000000000002AMW42H.costs \"cost_le\" + .cost-definer \"cost-definition-le\"))) + (vote-amount (* u9000000000 u1000000))) + (try! (contract-call? 'ST000000000000000000002AMW42H.cost-voting vote-proposal proposal-id vote-amount)) + (try! (contract-call? 'ST000000000000000000002AMW42H.cost-voting confirm-votes proposal-id)) + (ok proposal-id))) + "; - let tx_1 = make_contract_publish( - &spender_sk, - 0, - 50_000, - conf.burnchain.chain_id, - "first-contract", - contract_content, - ); - let tx_2 = make_contract_publish( - &spender_sk, - 1, - 50_000, - conf.burnchain.chain_id, - "second-contract", - contract_content, - ); + let spender_sk = StacksPrivateKey::random(); + let spender_addr = to_addr(&spender_sk); + let spender_princ: PrincipalData = spender_addr.into(); - // note: this test depends on timing of how long it takes to assemble a block, - // but it won't flake if the miner behaves correctly: a correct miner should - // always be able to mine both transactions by the end of this test. an incorrect - // miner may sometimes pass this test though, if they can successfully mine a - // 2-transaction block in 20 ms *OR* if they are slow enough that they mine a - // 0-transaction block in that time (because this would trigger a re-attempt, which - // is exactly what this test is measuring). - // - // The "fixed" behavior is the corner case where a miner did a "first attempt", which - // included 1 or more transaction, but they could have made a second attempt with - // more transactions. + let (mut conf, miner_account) = neon_integration_test_conf(); + + conf.miner.microblock_attempt_time_ms = 1_000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; + + test_observer::spawn(); + test_observer::register_any(&mut conf); + + let spender_bal = 10_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + + conf.initial_balances.push(InitialBalance { + address: spender_princ.clone(), + amount: spender_bal, + }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); let http_origin = format!("http://{}", &conf.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); + let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || run_loop.start(None, 0)); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); // give the run loop some time to start up! wait_for_runloop(&blocks_processed); @@ -4460,257 +3966,327 @@ fn miner_submit_twice() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - submit_tx(&http_origin, &tx_1); - submit_tx(&http_origin, &tx_2); + // let's query the miner's account nonce: + let res = get_account(&http_origin, &miner_account); + assert_eq!(res.balance, 0); + assert_eq!(res.nonce, 1); - // mine a couple more blocks - // waiting enough time between them that a second attempt could be made. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - thread::sleep(Duration::from_secs(15)); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // and our spender: + let res = get_account(&http_origin, &spender_princ); + assert_eq!(res.balance, spender_bal as u128); + assert_eq!(res.nonce, 0); - // 1 transaction mined - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 2); + let transactions = vec![ + make_contract_publish( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + "cost-definer", + cost_definer_src, + ), + make_contract_publish( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ), + make_contract_publish( + &spender_sk, + 2, + 1000, + conf.burnchain.chain_id, + "voter", + power_vote_src, + ), + ]; - channel.stop_chains_coordinator(); -} + for tx in transactions.into_iter() { + submit_tx(&http_origin, &tx); + } -#[test] -#[ignore] -fn size_check_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let mut giant_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..(1024 * 1024 + 500) { - giant_contract.push(' '); - } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let vote_tx = make_contract_call( + &spender_sk, + 3, + 1000, + conf.burnchain.chain_id, + &spender_addr, + "voter", + "propose-vote-confirm", + &[], + ); - let (mut conf, miner_account) = neon_integration_test_conf(); + let call_le_tx = make_contract_call( + &spender_sk, + 4, + 1000, + conf.burnchain.chain_id, + &spender_addr, + "caller", + "execute-2", + &[Value::UInt(1)], + ); - // make a bunch of txs that will only fit one per block. - let txs: Vec<_> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - if ix % 2 == 0 { - make_contract_publish( - spender_sk, - 0, - 1049230, - conf.burnchain.chain_id, - "large-0", - &giant_contract, - ) - } else { - let tx = make_contract_publish_microblock_only( - spender_sk, - 0, - 1049230, - conf.burnchain.chain_id, - "large-0", - &giant_contract, - ); - let parsed_tx = StacksTransaction::consensus_deserialize(&mut &tx[..]).unwrap(); - debug!("Mine transaction {} in a microblock", &parsed_tx.txid()); - tx - } - }) - .collect(); + submit_tx(&http_origin, &vote_tx); + submit_tx(&http_origin, &call_le_tx); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 5000; - conf.node.microblock_frequency = 5000; - conf.miner.microblock_attempt_time_ms = 120_000; + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + let mut tested = false; + let mut exec_cost = ExecutionCost::ZERO; + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "execute-2" { + exec_cost = + serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); + } else if contract_call.function_name.as_str() == "propose-vote-confirm" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); + assert_eq!(parsed.to_string(), "(ok u0)"); + tested = true; + } + } + } + assert!(tested, "Should have found a contract call tx"); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + // try to confirm the passed vote (this will fail) + let confirm_proposal = make_contract_call( + &spender_sk, + 5, + 1000, + conf.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "cost-voting", + "confirm-miners", + &[Value::UInt(0)], + ); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + submit_tx(&http_origin, &confirm_proposal); - btc_regtest_controller.bootstrap_chain(201); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Chain bootstrapped..."); + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + let mut tested = false; + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "confirm-miners" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); + assert_eq!(parsed.to_string(), "(err 13)"); + tested = true; + } + } + } + assert!(tested, "Should have found a contract call tx"); - let channel = run_loop.get_coordinator_channel().unwrap(); + for _i in 0..58 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } - thread::spawn(move || run_loop.start(None, 0)); + // confirm the passed vote + let confirm_proposal = make_contract_call( + &spender_sk, + 6, + 1000, + conf.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "cost-voting", + "confirm-miners", + &[Value::UInt(0)], + ); - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + submit_tx(&http_origin, &confirm_proposal); - // first block wakes up the run loop next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + let mut tested = false; + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "confirm-miners" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); + assert_eq!(parsed.to_string(), "(ok true)"); + tested = true; + } + } + } + assert!(tested, "Should have found a contract call tx"); - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - // and our potential spenders: + let call_le_tx = make_contract_call( + &spender_sk, + 7, + 1000, + conf.burnchain.chain_id, + &spender_addr, + "caller", + "execute-2", + &[Value::UInt(1)], + ); - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 1049230); - } + submit_tx(&http_origin, &call_le_tx); - for tx in txs.iter() { - // okay, let's push a bunch of transactions that can only fit one per block! - submit_tx(&http_origin, tx); - } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut micro_block_txs = 0; - let mut anchor_block_txs = 0; + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for i in 0..100 { - // now let's mine a couple blocks, and then check the sender's nonce. - // at the end of mining three blocks, there should be _at least one_ transaction from the microblock - // only set that got mined (since the block before this one was empty, a microblock can - // be added), - // and a number of transactions from equal to the number anchor blocks will get mined. - // - // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // this one will contain the sortition from above anchor block, - // which *should* have also confirmed the microblock. - sleep_ms(10_000 * i); - - micro_block_txs = 0; - anchor_block_txs = 0; - - // let's figure out how many micro-only and anchor-only txs got accepted - // by examining our account nonces: - for (ix, spender_addr) in spender_addrs.iter().enumerate() { - let res = get_account(&http_origin, &spender_addr); - if res.nonce == 1 { - if ix % 2 == 0 { - anchor_block_txs += 1; - } else { - micro_block_txs += 1; - } - } else if res.nonce != 0 { - panic!("Spender address nonce incremented past 1"); - } - - debug!("Spender {ix},{spender_addr}: {res:?}"); + let mut tested = false; + let mut new_exec_cost = ExecutionCost::max_value(); + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; } - - eprintln!("anchor_block_txs: {anchor_block_txs}, micro_block_txs: {micro_block_txs}"); - - if anchor_block_txs >= 2 && micro_block_txs >= 2 { - break; + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "execute-2" { + new_exec_cost = + serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); + tested = true; + } } } + assert!(tested, "Should have found a contract call tx"); - assert!(anchor_block_txs >= 2); - assert!(micro_block_txs >= 2); + assert!(exec_cost.exceeds(&new_exec_cost)); test_observer::clear(); channel.stop_chains_coordinator(); } -// if a microblock consumes the majority of the block budget, then _only_ a microblock will be -// mined for an epoch. #[test] #[ignore] -fn size_overflow_unconfirmed_microblocks_integration_test() { +fn mining_events_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - // stuff a gigantic contract into the anchored block - let mut giant_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..(1024 * 1024 + 500) { - giant_contract.push(' '); - } - - // small-sized contracts for microblocks - let mut small_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..(1024 * 1024 + 500) { - small_contract.push(' '); - } + let small_contract = "(define-public (f) (ok 1))".to_string(); - let spender_sks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let addr = to_addr(&spender_sk); - let (mut conf, miner_account) = neon_integration_test_conf(); + let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); + let addr_2 = to_addr(&spender_sk_2); - let txs: Vec> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - if ix % 2 == 0 { - // almost fills a whole block - vec![make_contract_publish( - spender_sk, - 0, - 1100000, - conf.burnchain.chain_id, - "large-0", - &giant_contract, - )] - } else { - let mut ret = vec![]; - for i in 0..25 { - let tx = make_contract_publish_microblock_only( - spender_sk, - i as u64, - 1100000, - conf.burnchain.chain_id, - &format!("small-{i}"), - &small_contract, - ); - ret.push(tx); - } - ret - } - }) - .collect(); + let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 10492300000, - }); - } + conf.initial_balances.push(InitialBalance { + address: addr.into(), + amount: 10000000, + }); + conf.initial_balances.push(InitialBalance { + address: addr_2.into(), + amount: 10000000, + }); conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 5_000; - conf.node.microblock_frequency = 5_000; - conf.miner.microblock_attempt_time_ms = 120_000; + conf.node.wait_time_for_microblocks = 1000; + conf.node.microblock_frequency = 1000; conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let tx = make_contract_publish( + &spender_sk, + 0, + 600000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 610000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + let mb_tx = make_contract_publish_microblock_only( + &spender_sk_2, + 0, + 620000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + test_observer::spawn(); - test_observer::register_any(&mut conf); + test_observer::register( + &mut conf, + &[ + EventKeyType::AnyEvent, + EventKeyType::MinedBlocks, + EventKeyType::MinedMicroblocks, + ], + ); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4726,7 +4302,6 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let microblocks_processed = run_loop.get_microblocks_processed_arc(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -4744,158 +4319,252 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - // and our potential spenders: - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10492300000); - } - - for tx_batch in txs.iter() { - for tx in tx_batch.iter() { - // okay, let's push a bunch of transactions that can only fit one per block! - submit_tx(&http_origin, tx); - } - } - - while wait_for_microblocks(µblocks_processed, 120) { - info!("Waiting for microblocks to no longer be processed"); - } + submit_tx(&http_origin, &tx); // should succeed + submit_tx(&http_origin, &tx_2); // should fail since it tries to publish contract with same name + submit_tx(&http_origin, &mb_tx); // should be in microblock bc it is microblock only - // now let's mine a couple blocks, and then check the sender's nonce. - // at the end of mining three blocks, there should be _two_ transactions from the microblock - // only set that got mined (since the block before this one was empty, a microblock can - // be added), - // and _two_ transactions from the two anchor blocks that got mined (and processed) - // - // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // this one will contain the sortition from above anchor block, - // which *should* have also confirmed the microblock. - - while wait_for_microblocks(µblocks_processed, 120) { - info!("Waiting for microblocks to no longer be processed"); - } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(30_000); - - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), 4); // genesis block + 3 blocks + // check that the nonces have gone up + let res = get_account(&http_origin, &addr); + assert_eq!(res.nonce, 1); - let mut max_big_txs_per_block = 0; - let mut max_big_txs_per_microblock = 0; - let mut total_big_txs_per_block = 0; - let mut total_big_txs_per_microblock = 0; + let res = get_account(&http_origin, &addr_2); + assert_eq!(res.nonce, 1); - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); + // check mined microblock events + let mined_microblock_events = test_observer::get_mined_microblocks(); + assert!(!mined_microblock_events.is_empty()); - let mut num_big_anchored_txs = 0; - let mut num_big_microblock_txs = 0; + // check tx events in the first microblock + // 1 success: 1 contract publish, 2 error (on chain transactions) + let microblock_tx_events = &mined_microblock_events[0].tx_events; + assert_eq!(microblock_tx_events.len(), 1); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("large-") { - num_big_anchored_txs += 1; - total_big_txs_per_block += 1; - } else if tsc.name.to_string().contains("small") { - num_big_microblock_txs += 1; - total_big_txs_per_microblock += 1; + // contract publish + match µblock_tx_events[0] { + TransactionEvent::Success(TransactionSuccessEvent { + result, + fee, + execution_cost, + .. + }) => { + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); + assert_eq!(fee, &620000); + assert_eq!( + execution_cost, + &ExecutionCost { + write_length: 35, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 311000 } - } - } - - if num_big_anchored_txs > max_big_txs_per_block { - max_big_txs_per_block = num_big_anchored_txs; - } - if num_big_microblock_txs > max_big_txs_per_microblock { - max_big_txs_per_microblock = num_big_microblock_txs; + ) } + _ => panic!("unexpected event type"), } - eprintln!( - "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}, total_big_txs_per_block: {total_big_txs_per_block}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" - ); - - assert!(max_big_txs_per_block > 0); - assert!(max_big_txs_per_microblock > 0); - assert!(total_big_txs_per_block > 0); - assert!(total_big_txs_per_microblock > 0); - - // can't have too many - assert!(max_big_txs_per_microblock <= 3); - assert!(max_big_txs_per_block <= 1); - - // NOTE: last-mined blocks aren't counted by the observer - assert!(total_big_txs_per_block <= 2); - assert!(total_big_txs_per_microblock <= 3); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -// mine a stream of microblocks, and verify that the miner won't let us overflow the size -#[test] -#[ignore] -fn size_overflow_unconfirmed_stream_microblocks_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let mut small_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..((1024 * 1024 + 500) / 3) { - small_contract.push(' '); - } - - let spender_sks: Vec<_> = (0..20).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + // check mined block events + let mined_block_events = test_observer::get_mined_blocks(); + assert!(mined_block_events.len() >= 3); - let (mut conf, miner_account) = neon_integration_test_conf(); + // check the tx events in the third mined block + // 2 success: 1 coinbase tx event + 1 contract publish, 1 error (duplicate contract) + let third_block_tx_events = &mined_block_events[2].tx_events; + assert_eq!(third_block_tx_events.len(), 3); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 10492300000, - }); + // coinbase event + match &third_block_tx_events[0] { + TransactionEvent::Success(TransactionSuccessEvent { txid, result, .. }) => { + assert_eq!( + txid.to_string(), + "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" + ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); + } + _ => panic!("unexpected event type"), } - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 1000; - conf.node.microblock_frequency = 1000; - conf.miner.microblock_attempt_time_ms = 120_000; - conf.node.max_microblocks = 65536; - conf.burnchain.max_rbf = 1000000; + // contract publish event + match &third_block_tx_events[1] { + TransactionEvent::Success(TransactionSuccessEvent { + result, + fee, + execution_cost, + .. + }) => { + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); + assert_eq!(fee, &600000); + assert_eq!( + execution_cost, + &ExecutionCost { + write_length: 35, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 311000 + } + ) + } + _ => panic!("unexpected event type"), + } + + // dupe contract error event + match &third_block_tx_events[2] { + TransactionEvent::ProcessingError(TransactionErrorEvent { txid: _, error }) => { + assert_eq!( + error, + "Duplicate contract 'ST3WM51TCWMJYGZS1QFMC28DH5YP86782YGR113C1.small'" + ); + } + _ => panic!("unexpected event type"), + } + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + +/// This test checks that the limit behavior in the miner works as expected for anchored block +/// building. When we first hit the block limit, the limit behavior switches to +/// `CONTRACT_LIMIT_HIT`, during which stx transfers are still allowed, and contract related +/// transactions are skipped. +/// Note: the test is sensitive to the order in which transactions are mined; it is written +/// expecting that transactions are traversed in the order tx_1, tx_2, tx_3, and tx_4. +#[test] +#[ignore] +fn block_limit_hit_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + // 700 invocations + let max_contract_src = format!( + "(define-private (work) (begin {} 1)) + (define-private (times-100) (begin {} 1)) + (define-private (times-200) (begin (times-100) (times-100) 1)) + (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) + (times-500) (times-200)", + (0..10) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" "), + (0..10) + .map(|_| "(work)".to_string()) + .collect::>() + .join(" "), + ); + + // 2900 invocations + let oversize_contract_src = format!( + "(define-private (work) (begin {} 1)) + (define-private (times-100) (begin {} 1)) + (define-private (times-200) (begin (times-100) (times-100) 1)) + (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) + (define-private (times-1000) (begin (times-500) (times-500) 1)) + (times-1000) (times-1000) (times-500) (times-200) (times-200)", + (0..10) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" "), + (0..10) + .map(|_| "(work)".to_string()) + .collect::>() + .join(" "), + ); + + let spender_sk = StacksPrivateKey::random(); + let addr = to_addr(&spender_sk); + let second_spender_sk = StacksPrivateKey::random(); + let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); + let third_spender_sk = StacksPrivateKey::random(); + let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); + + let (mut conf, _miner_account) = neon_integration_test_conf(); + + conf.initial_balances.push(InitialBalance { + address: addr.into(), + amount: 10_000_000, + }); + conf.initial_balances.push(InitialBalance { + address: second_spender_addr.clone(), + amount: 10_000_000, + }); + conf.initial_balances.push(InitialBalance { + address: third_spender_addr.clone(), + amount: 10_000_000, + }); + + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 30000; + conf.node.microblock_frequency = 1000; conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - let txs: Vec<_> = spender_sks - .iter() - .map(|spender_sk| { - make_contract_publish_microblock_only( - spender_sk, - 0, - 600000, - conf.burnchain.chain_id, - "small", - &small_contract, - ) - }) - .collect(); + // included in first block + let tx = make_contract_publish( + &spender_sk, + 0, + 555_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + // contract limit hit; included in second block + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 555_000, + conf.burnchain.chain_id, + "over-2", + &oversize_contract_src, + ); + // skipped over since contract limit was hit; included in second block + let tx_3 = make_contract_publish( + &second_spender_sk, + 0, + 150_000, + conf.burnchain.chain_id, + "max", + &max_contract_src, + ); + // included in first block + let tx_4 = make_stacks_transfer( + &third_spender_sk, + 0, + 180, + conf.burnchain.chain_id, + &PrincipalData::from(addr), + 100, + ); test_observer::spawn(); test_observer::register_any(&mut conf); @@ -4914,7 +4583,6 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let microblocks_processed = run_loop.get_microblocks_processed_arc(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -4932,159 +4600,134 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10492300000); - } - - let mut ctr = 0; - while ctr < txs.len() { - submit_tx(&http_origin, &txs[ctr]); - if !wait_for_microblocks(µblocks_processed, 60) { - // we time out if we *can't* mine any more microblocks - break; - } - ctr += 1; - } + // submit all the transactions + let txid_1 = submit_tx(&http_origin, &tx); + let txid_2 = submit_tx(&http_origin, &tx_2); + let txid_3 = submit_tx(&http_origin, &tx_3); + let txid_4 = submit_tx(&http_origin, &tx_4); - // should be able to fit 5 transactions in, in 5 microblocks - assert_eq!(ctr, 5); sleep_ms(5_000); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(20_000); - eprintln!("First confirmed microblock stream!"); - - microblocks_processed.store(0, Ordering::SeqCst); - - while ctr < txs.len() { - submit_tx(&http_origin, &txs[ctr]); - ctr += 1; - } - wait_for_microblocks(µblocks_processed, 60); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - eprintln!("Second confirmed microblock stream!"); - - wait_for_microblocks(µblocks_processed, 60); - - // confirm it - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(20_000); - // this test can sometimes miss a mine block event. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(20_000); - let blocks = test_observer::get_blocks(); - assert!(blocks.len() >= 5, "Should have produced at least 5 blocks"); - - let mut max_big_txs_per_microblock = 0; - let mut total_big_txs_per_microblock = 0; + let res = get_account(&http_origin, &addr); + assert_eq!(res.nonce, 2); - // NOTE: this only counts the number of txs per stream, not in each microblock - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); + let res = get_account(&http_origin, &second_spender_addr); + assert_eq!(res.nonce, 1); - let mut num_big_microblock_txs = 0; + let res = get_account(&http_origin, &third_spender_addr); + assert_eq!(res.nonce, 1); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("small") { - num_big_microblock_txs += 1; - total_big_txs_per_microblock += 1; - } - } - } - if num_big_microblock_txs > max_big_txs_per_microblock { - max_big_txs_per_microblock = num_big_microblock_txs; - } - } + let mined_block_events = test_observer::get_blocks(); + assert_eq!(mined_block_events.len(), 5); - eprintln!( - "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" - ); + let tx_third_block = mined_block_events[3] + .get("transactions") + .unwrap() + .as_array() + .unwrap(); + assert_eq!(tx_third_block.len(), 3); + let txid_1_exp = tx_third_block[1].get("txid").unwrap().as_str().unwrap(); + let txid_4_exp = tx_third_block[2].get("txid").unwrap().as_str().unwrap(); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); - assert_eq!(max_big_txs_per_microblock, 5); - assert!(total_big_txs_per_microblock >= 10); + let tx_fourth_block = mined_block_events[4] + .get("transactions") + .unwrap() + .as_array() + .unwrap(); + assert_eq!(tx_fourth_block.len(), 3); + let txid_2_exp = tx_fourth_block[1].get("txid").unwrap().as_str().unwrap(); + let txid_3_exp = tx_fourth_block[2].get("txid").unwrap().as_str().unwrap(); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); } -// Mine a too-long microblock stream, and verify that the anchored block miner truncates it down to -// the longest prefix of the stream that can be mined. #[test] #[ignore] -fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { +fn block_large_tx_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - // create microblock streams that are too big - env::set_var(core::FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK, "1"); - env::set_var(core::FAULT_DISABLE_MICROBLOCKS_COST_CHECK, "1"); - - let mut small_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..((1024 * 1024 + 500) / 8) { - small_contract.push(' '); - } - - let spender_sks: Vec<_> = (0..25).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - + let small_contract_src = format!( + "(define-public (f) (begin {} (ok 1))) (begin (f))", + (0..700) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" ") + ); + + let oversize_contract_src = format!( + "(define-public (f) (begin {} (ok 1))) (begin (f))", + (0..3500) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" ") + ); + + let spender_sk = StacksPrivateKey::random(); + let spender_addr = to_addr(&spender_sk); + let (mut conf, miner_account) = neon_integration_test_conf(); + test_observer::spawn(); + test_observer::register_any(&mut conf); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 10492300000, - }); - } + conf.initial_balances.push(InitialBalance { + address: spender_addr.into(), + amount: 10000000, + }); conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 5_000; - conf.node.microblock_frequency = 1_000; - conf.miner.microblock_attempt_time_ms = 120_000; - conf.node.max_microblocks = 65536; - conf.burnchain.max_rbf = 1000000; - - let txs: Vec> = spender_sks - .iter() - .map(|spender_sk| { - make_contract_publish_microblock_only( - spender_sk, - 0, - 1149230, - conf.burnchain.chain_id, - "small", - &small_contract, - ) - }) - .collect(); + conf.node.wait_time_for_microblocks = 30000; + conf.node.microblock_frequency = 1000; - let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch20].block_limit = core::BLOCK_LIMIT_MAINNET_20; - conf.burnchain.epochs = Some(epochs); + conf.miner.microblock_attempt_time_ms = i64::MAX as u64; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - test_observer::spawn(); - test_observer::register_any(&mut conf); + // higher fee for tx means it will get mined first + let tx = make_contract_publish( + &spender_sk, + 0, + 671_000, + conf.burnchain.chain_id, + "small", + &small_contract_src, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 670_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5100,7 +4743,6 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let microblocks_processed = run_loop.get_microblocks_processed_arc(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -5118,75 +4760,26 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: let account = get_account(&http_origin, &miner_account); assert_eq!(account.nonce, 1); assert_eq!(account.balance, 0); - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10492300000); - } - - let mut ctr = 0; - for _i in 0..6 { - submit_tx(&http_origin, &txs[ctr]); - if !wait_for_microblocks(µblocks_processed, 60) { - break; - } - ctr += 1; - } - - // confirm that we were able to use the fault-injection to *mine* 6 microblocks - assert_eq!(ctr, 6); - sleep_ms(5_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - eprintln!("First confirmed microblock stream!"); - - // confirm it - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), 4); // genesis block + 3 blocks - - let mut max_big_txs_per_microblock = 0; - let mut total_big_txs_per_microblock = 0; - - // NOTE: this only counts the number of txs per stream, not in each microblock - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + assert_eq!(account.balance, 10000000); - let mut num_big_microblock_txs = 0; + let normal_txid = submit_tx(&http_origin, &tx); + let huge_txid = submit_tx(&http_origin, &tx_2); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("small") { - num_big_microblock_txs += 1; - total_big_txs_per_microblock += 1; - } - } - } - if num_big_microblock_txs > max_big_txs_per_microblock { - max_big_txs_per_microblock = num_big_microblock_txs; - } - } + eprintln!("Try to mine a too-big tx. Normal = {normal_txid}, TooBig = {huge_txid}"); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); - eprintln!( - "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" - ); + eprintln!("Finished trying to mine a too-big tx"); - assert_eq!(max_big_txs_per_microblock, 3); - assert!(total_big_txs_per_microblock <= 6); + let dropped_txs = test_observer::get_memtx_drops(); + assert_eq!(dropped_txs.len(), 1); + assert_eq!(&dropped_txs[0].1, "TooExpensive"); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -5194,175 +4787,113 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { #[test] #[ignore] -fn runtime_overflow_unconfirmed_microblocks_integration_test() { +fn pox_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sks: Vec<_> = (0..4).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let spender_addrs_c32: Vec = spender_sks.iter().map(to_addr).collect(); + let spender_sk = StacksPrivateKey::random(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let (mut conf, miner_account) = neon_integration_test_conf(); + let spender_2_sk = StacksPrivateKey::random(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } + let spender_3_sk = StacksPrivateKey::random(); + let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 15000; - conf.miner.microblock_attempt_time_ms = 120_000; + let pox_pubkey = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::random()); + let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); - let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch20].block_limit = core::BLOCK_LIMIT_MAINNET_20; - conf.burnchain.epochs = Some(epochs); + let pox_2_address = BitcoinAddress::from_bytes_legacy( + BitcoinNetworkType::Testnet, + LegacyBitcoinAddressType::PublicKeyHash, + &Hash160::from_node_public_key(&pox_2_pubkey).to_bytes(), + ) + .unwrap(); - let txs: Vec> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - if ix % 2 == 0 { - // almost fills a whole block - vec![make_contract_publish( - spender_sk, - 0, - 1049230, - conf.burnchain.chain_id, - &format!("large-{ix}"), - &format!(" - ;; a single one of these transactions consumes over half the runtime budget - (define-constant BUFF_TO_BYTE (list - 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f - 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f - 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f - 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f - 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f - 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f - 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f - 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f - 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f - 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f - 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf - 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf - 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf - 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf - 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef - 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff - )) - (define-private (crash-me-folder (input (buff 1)) (ctr uint)) - (begin - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (+ u1 ctr) - ) - ) - (define-public (crash-me (name (string-ascii 128))) - (begin - (fold crash-me-folder BUFF_TO_BYTE u0) - (print name) - (ok u0) - ) - ) - (begin - (crash-me \"large-contract-{}-{ix}\")) - ", - &spender_addrs_c32[ix] - ) - )] - } else { - let mut ret = vec![]; - for i in 0..1 { - let tx = make_contract_publish_microblock_only( - spender_sk, - i as u64, - 210000, - conf.burnchain.chain_id, - &format!("small-{ix}-{i}"), - &format!(" - ;; a single one of these transactions consumes over half the runtime budget - (define-constant BUFF_TO_BYTE (list - 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f - 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f - 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f - 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f - 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f - 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f - 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f - 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f - 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f - 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f - 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf - 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf - 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf - 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf - 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef - 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff - )) - (define-private (crash-me-folder (input (buff 1)) (ctr uint)) - (begin - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (+ u1 ctr) - ) - ) - (define-public (crash-me (name (string-ascii 128))) - (begin - (fold crash-me-folder BUFF_TO_BYTE u0) - (print name) - (ok u0) - ) - ) - (begin - (crash-me \"small-contract-{}-{ix}-{i}\")) - ", spender_addrs_c32[ix]) - ); - ret.push(tx); - } - ret - } - }) - .collect(); + let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); test_observer::register_any(&mut conf); + // required for testing post-sunset behavior + conf.node.always_use_affirmation_maps = false; + + let first_bal = 6_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + let second_bal = 2_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + let third_bal = 2_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + let stacked_bal = 1_000_000_000 * u128::from(core::MICROSTACKS_PER_STACKS); + + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: first_bal, + }); + + conf.initial_balances.push(InitialBalance { + address: spender_2_addr, + amount: second_bal, + }); + + conf.initial_balances.push(InitialBalance { + address: spender_3_addr, + amount: third_bal, + }); + + conf.miner.microblock_attempt_time_ms = 1_000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + // reward cycle length = 15, so 10 reward cycle slots + 5 prepare-phase burns + let reward_cycle_len = 15; + let prepare_phase_len = 5; + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + (16 * reward_cycle_len - 1).into(), + (17 * reward_cycle_len).into(), + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); let http_origin = format!("http://{}", &conf.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); + let burnchain = burnchain_config.clone(); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); + let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || run_loop.start(None, 0)); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); // give the run loop some time to start up! wait_for_runloop(&blocks_processed); @@ -5376,64 +4907,134 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let sort_height = channel.get_sortitions_processed(); + // let's query the miner's account nonce: let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); assert_eq!(account.balance, 0); - // and our potential spenders: - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 1049230); - } - - for tx_batch in txs.iter() { - for tx in tx_batch.iter() { - // okay, let's push a bunch of transactions that can only fit one per block! - submit_tx(&http_origin, tx); - } - } - - debug!("Wait for 1st microblock to be mined"); - sleep_ms(150_000); + assert_eq!(account.nonce, 1); - // now let's mine a couple blocks, and then check the sender's nonce. - // at the end of mining three blocks, there should be _two_ transactions from the microblock - // only set that got mined (since the block before this one was empty, a microblock can - // be added), - // and _two_ transactions from the two anchor blocks that got mined (and processed) - // - // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // this one will contain the sortition from above anchor block, - // which *should* have also confirmed the microblock. + // and our potential spenders: + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.balance, first_bal as u128); + assert_eq!(account.nonce, 0); - debug!("Wait for 2nd microblock to be mined"); - sleep_ms(150_000); + let pox_info = get_pox_info(&http_origin).unwrap(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert_eq!(pox_info.first_burnchain_block_height, 0); + assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.stacked_ustx, 0); + assert!(!pox_info.current_cycle.is_pox_active); + assert_eq!(pox_info.next_cycle.stacked_ustx, 0); + assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); + assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 210); + assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 205); + assert_eq!(pox_info.next_cycle.min_increment_ustx, 1250710410920); + assert_eq!( + pox_info.prepare_cycle_length as u32, + pox_constants.prepare_length + ); + assert_eq!( + pox_info.rejection_fraction, + Some(pox_constants.pox_rejection_fraction) + ); + let reward_cycle = burnchain + .block_height_to_reward_cycle(sort_height) + .expect("Expected to be able to get reward cycle"); + assert_eq!(pox_info.reward_cycle_id, reward_cycle); + assert_eq!(pox_info.current_cycle.id, reward_cycle); + assert_eq!(pox_info.next_cycle.id, reward_cycle + 1); + assert_eq!( + pox_info.reward_cycle_length as u32, + pox_constants.reward_cycle_length + ); + assert_eq!(pox_info.total_liquid_supply_ustx, 10005683287360023); + assert_eq!(pox_info.next_reward_cycle_in, 6); - debug!("Wait for 3nd microblock to be mined"); - sleep_ms(150_000); + let tx = make_contract_call( + &spender_sk, + 0, + 260, + conf.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked_bal), + execute( + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), + ClarityVersion::Clarity1, + ) + .unwrap() + .unwrap(), + Value::UInt(sort_height as u128), + Value::UInt(6), + ], + ); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // okay, let's push that stacking transaction! + submit_tx(&http_origin, &tx); - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), 5); // genesis block + 4 blocks + let mut sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + test_observer::clear(); + + // now let's mine until the next reward cycle starts ... + while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } - let mut max_big_txs_per_block = 0; - let mut max_big_txs_per_microblock = 0; - let mut total_big_txs_in_blocks = 0; - let mut total_big_txs_in_microblocks = 0; + let pox_info = get_pox_info(&http_origin).unwrap(); + let reward_cycle = burnchain + .block_height_to_reward_cycle(sort_height) + .expect("Expected to be able to get reward cycle"); - for block in blocks { - eprintln!("block {block:?}"); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert_eq!(pox_info.first_burnchain_block_height, 0); + assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); + assert!(pox_info.pox_activation_threshold_ustx > 1500000000000000); + assert!(!pox_info.current_cycle.is_pox_active); + assert_eq!(pox_info.next_cycle.stacked_ustx, 1000000000000000); + assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); + assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); + assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 220); + assert_eq!( + pox_info.prepare_cycle_length as u32, + pox_constants.prepare_length + ); + assert_eq!( + pox_info.rejection_fraction, + Some(pox_constants.pox_rejection_fraction) + ); + assert_eq!(pox_info.reward_cycle_id, reward_cycle); + assert_eq!(pox_info.current_cycle.id, reward_cycle); + assert_eq!(pox_info.next_cycle.id, reward_cycle + 1); + assert_eq!( + pox_info.reward_cycle_length as u32, + pox_constants.reward_cycle_length + ); + assert_eq!(pox_info.next_reward_cycle_in, 14); - let mut num_big_anchored_txs = 0; - let mut num_big_microblock_txs = 0; + let blocks_observed = test_observer::get_blocks(); + assert!( + blocks_observed.len() >= 2, + "Blocks observed {} should be >= 2", + blocks_observed.len() + ); + // look up the return value of our stacking operation... + let mut tested = false; + for block in blocks_observed.iter() { + if tested { + break; + } + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -5441,874 +5042,1138 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - eprintln!("tx: {parsed:?}"); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("large-") { - num_big_anchored_txs += 1; - total_big_txs_in_blocks += 1; - } else if tsc.name.to_string().contains("small") { - num_big_microblock_txs += 1; - total_big_txs_in_microblocks += 1; + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "stack-stx" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); + // should unlock at height 300 (we're in reward cycle 13, lockup starts in reward cycle + // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward + // cycle length of 15 blocks, is a burnchain height of 300) + assert_eq!(parsed.to_string(), + format!("(ok (tuple (lock-amount u1000000000000000) (stacker {spender_addr}) (unlock-burn-height u300)))")); + tested = true; } } } - - if num_big_anchored_txs > max_big_txs_per_block { - max_big_txs_per_block = num_big_anchored_txs; - } - if num_big_microblock_txs > max_big_txs_per_microblock { - max_big_txs_per_microblock = num_big_microblock_txs; - } } - info!( - "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}" - ); - info!( - "total_big_txs_in_microblocks: {total_big_txs_in_microblocks}, total_big_txs_in_blocks: {total_big_txs_in_blocks}" + assert!(tested, "Should have observed stack-stx transaction"); + + // let's stack with spender 2 and spender 3... + + // now let's have sender_2 and sender_3 stack to pox spender_addr 2 in + // two different txs, and make sure that they sum together in the reward set. + + let tx = make_contract_call( + &spender_2_sk, + 0, + 260, + conf.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked_bal / 2), + execute( + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), + ClarityVersion::Clarity1, + ) + .unwrap() + .unwrap(), + Value::UInt(sort_height as u128), + Value::UInt(6), + ], ); - // at most one big tx per block and at most one big tx per stream, always. - assert_eq!(max_big_txs_per_microblock, 1); - assert_eq!(max_big_txs_per_block, 1); + // okay, let's push that stacking transaction! + submit_tx(&http_origin, &tx); - // if the mblock stream has a big tx, the anchored block won't (and vice versa) - // the changes for miner cost tracking (reset tracker between microblock and block, #2913) - // altered this test so that one more big tx ends up in an anchored block and one fewer - // ends up in a microblock - assert_eq!(total_big_txs_in_blocks, 2); - assert_eq!(total_big_txs_in_microblocks, 1); + let tx = make_contract_call( + &spender_3_sk, + 0, + 260, + conf.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked_bal / 2), + execute( + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), + ClarityVersion::Clarity1, + ) + .unwrap() + .unwrap(), + Value::UInt(sort_height as u128), + Value::UInt(6), + ], + ); - test_observer::clear(); - channel.stop_chains_coordinator(); -} + submit_tx(&http_origin, &tx); -#[test] -#[ignore] -fn block_replay_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; + // mine until the end of the current reward cycle. + sort_height = channel.get_sortitions_processed(); + while sort_height < ((15 * pox_constants.reward_cycle_length) - 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); } - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + let pox_info = get_pox_info(&http_origin).unwrap(); - let (mut conf, miner_account) = neon_integration_test_conf(); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert_eq!(pox_info.first_burnchain_block_height, 0); + assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); + assert!(!pox_info.current_cycle.is_pox_active); + assert_eq!(pox_info.next_cycle.stacked_ustx, 2000000000000000); + assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); + assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); + assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 220); + assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); + assert_eq!( + pox_info.prepare_cycle_length as u32, + pox_constants.prepare_length + ); + assert_eq!( + pox_info.rejection_fraction, + Some(pox_constants.pox_rejection_fraction) + ); + assert_eq!(pox_info.reward_cycle_id, 14); + assert_eq!(pox_info.current_cycle.id, 14); + assert_eq!(pox_info.next_cycle.id, 15); + assert_eq!( + pox_info.reward_cycle_length as u32, + pox_constants.reward_cycle_length + ); + assert_eq!(pox_info.next_reward_cycle_in, 1); - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 100300, - }); + // we should have received _no_ Bitcoin commitments, because the pox participation threshold + // was not met! + let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 0, + "Should have received no outputs during PoX reward cycle" + ); - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; - conf.node.microblock_frequency = 5_000; + // let's test the reward information in the observer + test_observer::clear(); - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + // before sunset + // mine until the end of the next reward cycle, + // the participation threshold now should be met. + while sort_height < ((16 * pox_constants.reward_cycle_length) - 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } - test_observer::spawn(); - test_observer::register_any(&mut conf); + let pox_info = get_pox_info(&http_origin).unwrap(); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert_eq!(pox_info.first_burnchain_block_height, 0); + assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.stacked_ustx, 2000000000000000); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 240); + assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 235); + assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); + assert_eq!(pox_info.next_reward_cycle_in, 1); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + // we should have received _seven_ Bitcoin commitments, because our commitment was 7 * threshold + let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); - btc_regtest_controller.bootstrap_chain(201); + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 7, + "Should have received outputs during PoX reward cycle" + ); - eprintln!("Chain bootstrapped..."); + // we should have received _seven_ Bitcoin commitments to pox_2_pubkey, because our commitment was 7 * threshold + // note: that if the reward set "summing" isn't implemented, this recipient would only have received _6_ slots, + // because each `stack-stx` call only received enough to get 3 slot individually. + let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 7, + "Should have received outputs during PoX reward cycle" + ); - let channel = run_loop.get_coordinator_channel().unwrap(); + let burn_blocks = test_observer::get_burn_blocks(); + let mut recipient_slots: HashMap = HashMap::new(); - thread::spawn(move || run_loop.start(None, 0)); + for block in burn_blocks.iter() { + let reward_slot_holders = block + .get("reward_slot_holders") + .unwrap() + .as_array() + .unwrap() + .iter() + .map(|x| x.as_str().unwrap().to_string()); + for holder in reward_slot_holders { + if let Some(current) = recipient_slots.get_mut(&holder) { + *current += 1; + } else { + recipient_slots.insert(holder, 1); + } + } + } - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + let pox_1_address = BitcoinAddress::from_bytes_legacy( + BitcoinNetworkType::Testnet, + LegacyBitcoinAddressType::PublicKeyHash, + &Hash160::from_node_public_key(&pox_pubkey).to_bytes(), + ) + .unwrap(); - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + assert_eq!(recipient_slots.len(), 2); + assert_eq!( + recipient_slots.get(&format!("{pox_2_address}")).cloned(), + Some(7u64) + ); + assert_eq!( + recipient_slots.get(&format!("{pox_1_address}")).cloned(), + Some(7u64) + ); - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // get the canonical chain tip + let tip_info = get_chain_info(&conf); - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); + assert_eq!(tip_info.stacks_tip_height, 36); - // let's query the miner's account nonce: + // now let's mine into the sunset + while sort_height < ((17 * pox_constants.reward_cycle_length) - 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } - info!("Miner account: {miner_account}"); - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 1); + // get the canonical chain tip + let tip_info = get_chain_info(&conf); - // and our spender - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, 100300); - assert_eq!(account.nonce, 0); + eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); + assert_eq!(tip_info.stacks_tip_height, 51); - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - submit_tx(&http_origin, &tx); + let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // should receive more rewards during this cycle... + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 14, + "Should have received more outputs during the sunsetting PoX reward cycle" + ); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // and after sunset + while sort_height < ((18 * pox_constants.reward_cycle_length) - 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - // try and push the mined block back at the node lots of times - let (tip_consensus_hash, tip_block) = get_tip_anchored_block(&conf); - let mut tip_block_bytes = vec![]; - tip_block.consensus_serialize(&mut tip_block_bytes).unwrap(); + // should *not* receive more rewards during the after sunset cycle... + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 14, + "Should have received no more outputs after sunset PoX reward cycle" + ); - for i in 0..1024 { - let path = format!("{http_origin}/v2/blocks/upload/{tip_consensus_hash}"); - let res_text = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tip_block_bytes.clone()) - .send() - .unwrap() - .text() - .unwrap(); + // should have progressed the chain, though! + // get the canonical chain tip + let tip_info = get_chain_info(&conf); - eprintln!("{i}: text of {path}\n{res_text}"); - } + eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); + assert_eq!(tip_info.stacks_tip_height, 66); test_observer::clear(); channel.stop_chains_coordinator(); } +#[derive(Debug)] +enum Signal { + BootstrapNodeReady, + FollowerNodeReady, + ReplicatingAttachmentsStartTest1, + ReplicatingAttachmentsCheckTest1(u64), + ReplicatingAttachmentsStartTest2, + ReplicatingAttachmentsCheckTest2(u64), +} + #[test] #[ignore] -fn cost_voting_integration() { +fn atlas_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - // let's make `<` free... - let cost_definer_src = " - (define-read-only (cost-definition-le (size uint)) - { - runtime: u0, write_length: u0, write_count: u0, read_count: u0, read_length: u0 - }) - "; - - // the contract that we'll test the costs of - let caller_src = " - (define-public (execute-2 (a uint)) - (ok (< a a))) - "; + let user_1 = StacksPrivateKey::random(); + let initial_balance_user_1 = InitialBalance { + address: to_addr(&user_1).into(), + amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), + }; - let power_vote_src = " - (define-public (propose-vote-confirm) - (let - ((proposal-id (unwrap-panic (contract-call? 'ST000000000000000000002AMW42H.cost-voting submit-proposal - 'ST000000000000000000002AMW42H.costs \"cost_le\" - .cost-definer \"cost-definition-le\"))) - (vote-amount (* u9000000000 u1000000))) - (try! (contract-call? 'ST000000000000000000002AMW42H.cost-voting vote-proposal proposal-id vote-amount)) - (try! (contract-call? 'ST000000000000000000002AMW42H.cost-voting confirm-votes proposal-id)) - (ok proposal-id))) - "; + // Prepare the config of the bootstrap node + let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); + let bootstrap_node_public_key = { + let keychain = Keychain::default(conf_bootstrap_node.node.seed.clone()); + let mut pk = keychain.generate_op_signer().get_public_key(); + pk.set_compressed(true); + pk.to_hex() + }; + conf_bootstrap_node + .initial_balances + .push(initial_balance_user_1.clone()); - let spender_sk = StacksPrivateKey::random(); - let spender_addr = to_addr(&spender_sk); - let spender_princ: PrincipalData = spender_addr.into(); + conf_bootstrap_node.node.always_use_affirmation_maps = false; - let (mut conf, miner_account) = neon_integration_test_conf(); + // Prepare the config of the follower node + let (mut conf_follower_node, _) = neon_integration_test_conf(); + let bootstrap_node_url = format!( + "{}@{}", + bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind + ); + conf_follower_node.node.set_bootstrap_nodes( + bootstrap_node_url, + conf_follower_node.burnchain.chain_id, + conf_follower_node.burnchain.peer_version, + ); + conf_follower_node.node.miner = false; + conf_follower_node + .initial_balances + .push(initial_balance_user_1.clone()); + conf_follower_node + .events_observers + .insert(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, + disable_retries: false, + }); - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 1_000; - conf.miner.first_attempt_time_ms = 2_000; - conf.miner.subsequent_attempt_time_ms = 5_000; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; + conf_follower_node.node.always_use_affirmation_maps = false; - test_observer::spawn(); - test_observer::register_any(&mut conf); + // Our 2 nodes will share the bitcoind node + let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - let spender_bal = 10_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + let (bootstrap_node_tx, bootstrap_node_rx) = mpsc::channel(); + let (follower_node_tx, follower_node_rx) = mpsc::channel(); - conf.initial_balances.push(InitialBalance { - address: spender_princ.clone(), - amount: spender_bal, - }); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + let bootstrap_node_thread = thread::spawn(move || { + let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); - let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf_bootstrap_node.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf_bootstrap_node.node.rpc_bind); - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + btc_regtest_controller.bootstrap_chain(201); - btc_regtest_controller.bootstrap_chain(201); + eprintln!("Chain bootstrapped..."); - eprintln!("Chain bootstrapped..."); + let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); + let channel = run_loop.get_coordinator_channel().unwrap(); - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Let's setup the follower now. + follower_node_tx + .send(Signal::BootstrapNodeReady) + .expect("Unable to send signal"); - // let's query the miner's account nonce: - let res = get_account(&http_origin, &miner_account); - assert_eq!(res.balance, 0); - assert_eq!(res.nonce, 1); + match bootstrap_node_rx.recv() { + Ok(Signal::ReplicatingAttachmentsStartTest1) => { + println!("Follower node is ready..."); + } + _ => panic!("Bootstrap node could nod boot. Aborting test."), + }; - // and our spender: - let res = get_account(&http_origin, &spender_princ); - assert_eq!(res.balance, spender_bal as u128); - assert_eq!(res.nonce, 0); + // Let's publish a (1) namespace-preorder, (2) namespace-reveal and (3) name-import in this mempool - let transactions = vec![ - make_contract_publish( - &spender_sk, + // (define-public (namespace-preorder (hashed-salted-namespace (buff 20)) + // (stx-to-burn uint)) + let namespace = "passport"; + let salt = "some-salt"; + let salted_namespace = format!("{namespace}{salt}"); + let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); + let tx_1 = make_contract_call( + &user_1, 0, - 1000, - conf.burnchain.chain_id, - "cost-definer", - cost_definer_src, - ), - make_contract_publish( - &spender_sk, + 260, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-preorder", + &[ + Value::buff_from(hashed_namespace.to_bytes().to_vec()).unwrap(), + Value::UInt(1000000000), + ], + ); + + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_1.clone()) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx_1[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + + // (define-public (namespace-reveal (namespace (buff 20)) + // (namespace-salt (buff 20)) + // (p-func-base uint) + // (p-func-coeff uint) + // (p-func-b1 uint) + // (p-func-b2 uint) + // (p-func-b3 uint) + // (p-func-b4 uint) + // (p-func-b5 uint) + // (p-func-b6 uint) + // (p-func-b7 uint) + // (p-func-b8 uint) + // (p-func-b9 uint) + // (p-func-b10 uint) + // (p-func-b11 uint) + // (p-func-b12 uint) + // (p-func-b13 uint) + // (p-func-b14 uint) + // (p-func-b15 uint) + // (p-func-b16 uint) + // (p-func-non-alpha-discount uint) + // (p-func-no-vowel-discount uint) + // (lifetime uint) + // (namespace-import principal)) + let tx_2 = make_contract_call( + &user_1, 1, 1000, - conf.burnchain.chain_id, - "caller", - caller_src, - ), - make_contract_publish( - &spender_sk, - 2, - 1000, - conf.burnchain.chain_id, - "voter", - power_vote_src, - ), - ]; - - for tx in transactions.into_iter() { - submit_tx(&http_origin, &tx); - } + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-reveal", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(salt.as_bytes().to_vec()).unwrap(), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1000), + Value::Principal(initial_balance_user_1.address.clone()), + ], + ); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_2.clone()) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx_2[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } - let vote_tx = make_contract_call( - &spender_sk, - 3, - 1000, - conf.burnchain.chain_id, - &spender_addr, - "voter", - "propose-vote-confirm", - &[], - ); + // (define-public (name-import (namespace (buff 20)) + // (name (buff 48)) + // (zonefile-hash (buff 20))) + let zonefile_hex = "facade00"; + let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); + let tx_3 = make_contract_call( + &user_1, + 2, + 500, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-import", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from("johndoe".as_bytes().to_vec()).unwrap(), + Value::Principal(to_addr(&user_1).into()), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); - let call_le_tx = make_contract_call( - &spender_sk, - 4, - 1000, - conf.burnchain.chain_id, - &spender_addr, - "caller", - "execute-2", - &[Value::UInt(1)], - ); - - submit_tx(&http_origin, &vote_tx); - submit_tx(&http_origin, &call_le_tx); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_3), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } - // clear and mine another burnchain block, so that the new winner is seen by the observer - // (the observer is logically "one block behind" the miner - test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // From there, let's mine these transaction, and build more blocks. + let mut sort_height = channel.get_sortitions_processed(); + let few_blocks = sort_height + 10; - let mut blocks = test_observer::get_blocks(); - // should have produced 1 new block - assert_eq!(blocks.len(), 1); - let block = blocks.pop().unwrap(); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); - let mut tested = false; - let mut exec_cost = ExecutionCost::ZERO; - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "execute-2" { - exec_cost = - serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); - } else if contract_call.function_name.as_str() == "propose-vote-confirm" { - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - assert_eq!(parsed.to_string(), "(ok u0)"); - tested = true; - } + while sort_height < few_blocks { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); } - } - assert!(tested, "Should have found a contract call tx"); - - // try to confirm the passed vote (this will fail) - let confirm_proposal = make_contract_call( - &spender_sk, - 5, - 1000, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "cost-voting", - "confirm-miners", - &[Value::UInt(0)], - ); - submit_tx(&http_origin, &confirm_proposal); + // Then check that the follower is correctly replicating the attachment + follower_node_tx + .send(Signal::ReplicatingAttachmentsCheckTest1(sort_height)) + .expect("Unable to send signal"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + match bootstrap_node_rx.recv() { + Ok(Signal::ReplicatingAttachmentsStartTest2) => { + println!("Follower node is ready..."); + } + _ => panic!("Bootstrap node could nod boot. Aborting test."), + }; - // clear and mine another burnchain block, so that the new winner is seen by the observer - // (the observer is logically "one block behind" the miner - test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // From there, let's mine these transaction, and build more blocks. + let mut sort_height = channel.get_sortitions_processed(); + let few_blocks = sort_height + 10; - let mut blocks = test_observer::get_blocks(); - // should have produced 1 new block - assert_eq!(blocks.len(), 1); - let block = blocks.pop().unwrap(); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); - let mut tested = false; - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; + while sort_height < few_blocks { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "confirm-miners" { - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - assert_eq!(parsed.to_string(), "(err 13)"); - tested = true; + + // Poll GET v2/attachments/ + for i in 1..10 { + let mut attachments_did_sync = false; + let mut timeout = 60; + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); + let hashed_zonefile = Hash160::from_data(&zonefile_hex); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); + let res = client + .get(&path) + .header("Content-Type", "application/json") + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let attachment_response: GetAttachmentResponse = res.json().unwrap(); + assert_eq!(attachment_response.attachment.content, zonefile_hex); + attachments_did_sync = true; + } else { + timeout -= 1; + if timeout == 0 { + panic!("Failed syncing 9 attachments between 2 neon runloops within 60s (failed at {}) - Something is wrong", &to_hex(&zonefile_hex)); + } + eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); + thread::sleep(Duration::from_millis(1000)); + } } } - } - assert!(tested, "Should have found a contract call tx"); - for _i in 0..58 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } - - // confirm the passed vote - let confirm_proposal = make_contract_call( - &spender_sk, - 6, - 1000, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "cost-voting", - "confirm-miners", - &[Value::UInt(0)], - ); + // Then check that the follower is correctly replicating the attachment + follower_node_tx + .send(Signal::ReplicatingAttachmentsCheckTest2(sort_height)) + .expect("Unable to send signal"); - submit_tx(&http_origin, &confirm_proposal); + channel.stop_chains_coordinator(); + }); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // clear and mine another burnchain block, so that the new winner is seen by the observer - // (the observer is logically "one block behind" the miner - test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Start the attached observer + test_observer::spawn(); - let mut blocks = test_observer::get_blocks(); - // should have produced 1 new block - assert_eq!(blocks.len(), 1); - let block = blocks.pop().unwrap(); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); - let mut tested = false; - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "confirm-miners" { - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - assert_eq!(parsed.to_string(), "(ok true)"); - tested = true; - } + // The bootstrap node mined a few blocks and is ready, let's setup this node. + match follower_node_rx.recv() { + Ok(Signal::BootstrapNodeReady) => { + println!("Booting follower node..."); } - } - assert!(tested, "Should have found a contract call tx"); + _ => panic!("Bootstrap node could nod boot. Aborting test."), + }; - let call_le_tx = make_contract_call( - &spender_sk, - 7, - 1000, - conf.burnchain.chain_id, - &spender_addr, - "caller", - "execute-2", - &[Value::UInt(1)], - ); + let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); + let chain_id = conf_follower_node.burnchain.chain_id; + let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); - submit_tx(&http_origin, &call_le_tx); + eprintln!("Chain bootstrapped..."); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // clear and mine another burnchain block, so that the new winner is seen by the observer - // (the observer is logically "one block behind" the miner - test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let mut run_loop = neon::RunLoop::new(conf_follower_node.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); + let channel = run_loop.get_coordinator_channel().unwrap(); - let mut blocks = test_observer::get_blocks(); - // should have produced 1 new block - assert_eq!(blocks.len(), 1); - let block = blocks.pop().unwrap(); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - let mut tested = false; - let mut new_exec_cost = ExecutionCost::max_value(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "execute-2" { - new_exec_cost = - serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); - tested = true; + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // Follower node is ready, the bootstrap node will now handover + bootstrap_node_tx + .send(Signal::ReplicatingAttachmentsStartTest1) + .expect("Unable to send signal"); + + // The bootstrap node published and mined a transaction that includes an attachment. + // Lets observe the attachments replication kicking in. + let target_height = match follower_node_rx.recv() { + Ok(Signal::ReplicatingAttachmentsCheckTest1(target_height)) => target_height, + _ => panic!("Bootstrap node could nod boot. Aborting test."), + }; + + let mut sort_height = channel.get_sortitions_processed(); + while sort_height < target_height { + wait_for_runloop(&blocks_processed); + sort_height = channel.get_sortitions_processed(); + } + + // Now wait for the node to sync the attachment + let mut attachments_did_sync = false; + let mut timeout = 60; + while !attachments_did_sync { + let zonefile_hex = "facade00"; + let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); + let res = client + .get(&path) + .header("Content-Type", "application/json") + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + eprintln!("Success syncing attachment - {}", res.text().unwrap()); + attachments_did_sync = true; + } else { + timeout -= 1; + if timeout == 0 { + panic!("Failed syncing 1 attachments between 2 neon runloops within 60s - Something is wrong"); } + eprintln!("Attachment {zonefile_hex} not sync'd yet"); + thread::sleep(Duration::from_millis(1000)); } } - assert!(tested, "Should have found a contract call tx"); - assert!(exec_cost.exceeds(&new_exec_cost)); + // Test 2: 9 transactions are posted to the follower. + // We want to make sure that the miner is able to + // 1) mine these transactions + // 2) retrieve the attachments staged on the follower node. + // 3) ensure that the follower is also instantiating the attachments after + // executing the transactions, once mined. + let namespace = "passport"; + for i in 1..10 { + let user = StacksPrivateKey::random(); + let zonefile_hex = format!("facade0{i}"); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); + let name = format!("johndoe{i}"); + let tx = make_contract_call( + &user_1, + 2 + i, + 500, + chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-import", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(name.as_bytes().to_vec()).unwrap(), + Value::Principal(to_addr(&user).into()), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); + + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; + + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } + + bootstrap_node_tx + .send(Signal::ReplicatingAttachmentsStartTest2) + .expect("Unable to send signal"); + + let target_height = match follower_node_rx.recv() { + Ok(Signal::ReplicatingAttachmentsCheckTest2(target_height)) => target_height, + _ => panic!("Bootstrap node could not boot. Aborting test."), + }; + + let mut sort_height = channel.get_sortitions_processed(); + while sort_height < target_height { + wait_for_runloop(&blocks_processed); + sort_height = channel.get_sortitions_processed(); + } + + // Poll GET v2/attachments/ + for i in 1..10 { + let mut attachments_did_sync = false; + let mut timeout = 60; + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); + let hashed_zonefile = Hash160::from_data(&zonefile_hex); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); + let res = client + .get(&path) + .header("Content-Type", "application/json") + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let attachment_response: GetAttachmentResponse = res.json().unwrap(); + assert_eq!(attachment_response.attachment.content, zonefile_hex); + attachments_did_sync = true; + } else { + timeout -= 1; + if timeout == 0 { + panic!("Failed syncing 9 attachments between 2 neon runloops within 60s (failed at {}) - Something is wrong", &to_hex(&zonefile_hex)); + } + eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); + thread::sleep(Duration::from_millis(1000)); + } + } + } + // Ensure that we the attached sidecar was able to receive a total of 10 attachments + // This last assertion is flacky for some reason, it does not worth bullying the CI or disabling this whole test + // We're using an inequality as a best effort, to make sure that **some** attachments were received. + assert!(!test_observer::get_attachments().is_empty()); test_observer::clear(); channel.stop_chains_coordinator(); + + bootstrap_node_thread.join().unwrap(); } #[test] #[ignore] -fn mining_events_integration_test() { +fn antientropy_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let small_contract = "(define-public (f) (ok 1))".to_string(); + let user_1 = StacksPrivateKey::random(); + let initial_balance_user_1 = InitialBalance { + address: to_addr(&user_1).into(), + amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), + }; - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let addr = to_addr(&spender_sk); + // Prepare the config of the bootstrap node + let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); + let bootstrap_node_public_key = { + let keychain = Keychain::default(conf_bootstrap_node.node.seed.clone()); + let mut pk = keychain.generate_op_signer().get_public_key(); + pk.set_compressed(true); + pk.to_hex() + }; + conf_bootstrap_node + .initial_balances + .push(initial_balance_user_1.clone()); + conf_bootstrap_node.connection_options.antientropy_retry = 10; // move this along -- do anti-entropy protocol once every 10 seconds + conf_bootstrap_node.connection_options.antientropy_public = true; // always push blocks, even if we're not NAT'ed + conf_bootstrap_node.connection_options.max_block_push = 1000; + conf_bootstrap_node.connection_options.max_microblock_push = 1000; - let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); - let addr_2 = to_addr(&spender_sk_2); + conf_bootstrap_node.node.mine_microblocks = true; + conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; + conf_bootstrap_node.node.wait_time_for_microblocks = 0; + conf_bootstrap_node.node.microblock_frequency = 0; + conf_bootstrap_node.miner.first_attempt_time_ms = 1_000_000; + conf_bootstrap_node.miner.subsequent_attempt_time_ms = 1_000_000; + conf_bootstrap_node.burnchain.max_rbf = 1000000; + conf_bootstrap_node.node.wait_time_for_blocks = 1_000; - let (mut conf, _) = neon_integration_test_conf(); + conf_bootstrap_node.node.always_use_affirmation_maps = false; - conf.initial_balances.push(InitialBalance { - address: addr.into(), - amount: 10000000, - }); - conf.initial_balances.push(InitialBalance { - address: addr_2.into(), - amount: 10000000, - }); + // Prepare the config of the follower node + let (mut conf_follower_node, _) = neon_integration_test_conf(); + let bootstrap_node_url = format!( + "{bootstrap_node_public_key}@{}", + conf_bootstrap_node.node.p2p_bind + ); + conf_follower_node.connection_options.disable_block_download = true; + conf_follower_node.node.set_bootstrap_nodes( + bootstrap_node_url, + conf_follower_node.burnchain.chain_id, + conf_follower_node.burnchain.peer_version, + ); + conf_follower_node.node.miner = false; + conf_follower_node + .initial_balances + .push(initial_balance_user_1); + conf_follower_node + .events_observers + .insert(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, + disable_retries: false, + }); - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 1000; - conf.node.microblock_frequency = 1000; + conf_follower_node.node.mine_microblocks = true; + conf_follower_node.miner.microblock_attempt_time_ms = 2_000; + conf_follower_node.node.wait_time_for_microblocks = 0; + conf_follower_node.node.microblock_frequency = 0; + conf_follower_node.miner.first_attempt_time_ms = 1_000_000; + conf_follower_node.miner.subsequent_attempt_time_ms = 1_000_000; + conf_follower_node.burnchain.max_rbf = 1000000; + conf_follower_node.node.wait_time_for_blocks = 1_000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf_follower_node.node.always_use_affirmation_maps = false; - let tx = make_contract_publish( - &spender_sk, - 0, - 600000, - conf.burnchain.chain_id, - "small", - &small_contract, - ); - let tx_2 = make_contract_publish( - &spender_sk, - 1, - 610000, - conf.burnchain.chain_id, - "small", - &small_contract, - ); - let mb_tx = make_contract_publish_microblock_only( - &spender_sk_2, - 0, - 620000, - conf.burnchain.chain_id, - "small", - &small_contract, - ); + // Our 2 nodes will share the bitcoind node + let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - test_observer::spawn(); - test_observer::register( - &mut conf, - &[ - EventKeyType::AnyEvent, - EventKeyType::MinedBlocks, - EventKeyType::MinedMicroblocks, - ], - ); + let (bootstrap_node_tx, bootstrap_node_rx) = mpsc::channel(); + let (follower_node_tx, follower_node_rx) = mpsc::channel(); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); + let target_height = 3 + (3 * burnchain_config.pox_constants.reward_cycle_length); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + let bootstrap_node_thread = thread::spawn(move || { + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf_bootstrap_node.clone(), + None, + Some(burnchain_config.clone()), + None, + ); - btc_regtest_controller.bootstrap_chain(201); + btc_regtest_controller.bootstrap_chain(201); - eprintln!("Chain bootstrapped..."); + eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); + let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); - let channel = run_loop.get_coordinator_channel().unwrap(); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - thread::spawn(move || run_loop.start(None, 0)); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + for i in 0..(target_height - 3) { + eprintln!("Mine block {i}"); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Let's setup the follower now. + follower_node_tx + .send(Signal::BootstrapNodeReady) + .expect("Unable to send signal"); - submit_tx(&http_origin, &tx); // should succeed - submit_tx(&http_origin, &tx_2); // should fail since it tries to publish contract with same name - submit_tx(&http_origin, &mb_tx); // should be in microblock bc it is microblock only + eprintln!("Bootstrap node informed follower that it's ready; waiting for acknowledgement"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // wait for bootstrap node to terminate + match bootstrap_node_rx.recv() { + Ok(Signal::FollowerNodeReady) => { + println!("Follower has finished"); + } + Ok(x) => { + panic!("Follower gave a bad signal: {x:?}"); + } + Err(e) => { + panic!("Failed to recv: {e:?}"); + } + }; - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + channel.stop_chains_coordinator(); + }); - // check that the nonces have gone up - let res = get_account(&http_origin, &addr); - assert_eq!(res.nonce, 1); + // Start the attached observer + test_observer::spawn(); - let res = get_account(&http_origin, &addr_2); - assert_eq!(res.nonce, 1); + // The bootstrap node mined a few blocks and is ready, let's setup this node. + match follower_node_rx.recv() { + Ok(Signal::BootstrapNodeReady) => { + println!("Booting follower node..."); + } + _ => panic!("Bootstrap node could not boot. Aborting test."), + }; - // check mined microblock events - let mined_microblock_events = test_observer::get_mined_microblocks(); - assert!(!mined_microblock_events.is_empty()); + let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); + let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); - // check tx events in the first microblock - // 1 success: 1 contract publish, 2 error (on chain transactions) - let microblock_tx_events = &mined_microblock_events[0].tx_events; - assert_eq!(microblock_tx_events.len(), 1); + eprintln!("Chain bootstrapped..."); - // contract publish - match µblock_tx_events[0] { - TransactionEvent::Success(TransactionSuccessEvent { - result, - fee, - execution_cost, - .. - }) => { - assert!(result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap()); - assert_eq!(fee, &620000); - assert_eq!( - execution_cost, - &ExecutionCost { - write_length: 35, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 311000 - } - ) - } - _ => panic!("unexpected event type"), - } + let mut run_loop = neon::RunLoop::new(conf_follower_node.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); - // check mined block events - let mined_block_events = test_observer::get_mined_blocks(); - assert!(mined_block_events.len() >= 3); + let thread_burnchain_config = burnchain_config.clone(); + thread::spawn(move || run_loop.start(Some(thread_burnchain_config), 0)); - // check the tx events in the third mined block - // 2 success: 1 coinbase tx event + 1 contract publish, 1 error (duplicate contract) - let third_block_tx_events = &mined_block_events[2].tx_events; - assert_eq!(third_block_tx_events.len(), 3); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - // coinbase event - match &third_block_tx_events[0] { - TransactionEvent::Success(TransactionSuccessEvent { txid, result, .. }) => { - assert_eq!( - txid.to_string(), - "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" - ); - assert!(result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap()); - } - _ => panic!("unexpected event type"), + let mut sort_height = channel.get_sortitions_processed(); + while sort_height < (target_height + 200) as u64 { + eprintln!( + "Follower sortition is {sort_height}, target is {}", + target_height + 200 + ); + wait_for_runloop(&blocks_processed); + sort_height = channel.get_sortitions_processed(); + sleep_ms(1000); } - // contract publish event - match &third_block_tx_events[1] { - TransactionEvent::Success(TransactionSuccessEvent { - result, - fee, - execution_cost, - .. - }) => { - assert!(result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap()); - assert_eq!(fee, &600000); - assert_eq!( - execution_cost, - &ExecutionCost { - write_length: 35, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 311000 - } - ) - } - _ => panic!("unexpected event type"), - } + eprintln!("Follower booted up; waiting for blocks"); - // dupe contract error event - match &third_block_tx_events[2] { - TransactionEvent::ProcessingError(TransactionErrorEvent { txid: _, error }) => { - assert_eq!( - error, - "Duplicate contract 'ST3WM51TCWMJYGZS1QFMC28DH5YP86782YGR113C1.small'" - ); + // wait for block height to reach target + let mut tip_height = get_chain_tip_height(&http_origin); + eprintln!( + "Follower Stacks tip height is {tip_height}, wait until {tip_height} >= {target_height} - 3" + ); + + let btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf_follower_node.clone(), + None, + Some(burnchain_config), + None, + ); + + let mut burnchain_deadline = get_epoch_time_secs() + 60; + while tip_height < (target_height - 3) as u64 { + sleep_ms(1000); + tip_height = get_chain_tip_height(&http_origin); + + eprintln!("Follower Stacks tip height is {tip_height}"); + + if burnchain_deadline < get_epoch_time_secs() { + burnchain_deadline = get_epoch_time_secs() + 60; + btc_regtest_controller.build_next_block(1); } - _ => panic!("unexpected event type"), } + bootstrap_node_tx + .send(Signal::FollowerNodeReady) + .expect("Unable to send signal"); + bootstrap_node_thread.join().unwrap(); + + eprintln!("Follower node finished"); + test_observer::clear(); channel.stop_chains_coordinator(); } -/// This test checks that the limit behavior in the miner works as expected for anchored block -/// building. When we first hit the block limit, the limit behavior switches to -/// `CONTRACT_LIMIT_HIT`, during which stx transfers are still allowed, and contract related -/// transactions are skipped. -/// Note: the test is sensitive to the order in which transactions are mined; it is written -/// expecting that transactions are traversed in the order tx_1, tx_2, tx_3, and tx_4. -#[test] -#[ignore] -fn block_limit_hit_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - // 700 invocations - let max_contract_src = format!( - "(define-private (work) (begin {} 1)) - (define-private (times-100) (begin {} 1)) - (define-private (times-200) (begin (times-100) (times-100) 1)) - (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) - (times-500) (times-200)", - (0..10) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" "), - (0..10) - .map(|_| "(work)".to_string()) - .collect::>() - .join(" "), - ); +#[allow(clippy::too_many_arguments)] +fn wait_for_mined( + btc_regtest_controller: &mut BitcoinRegtestController, + blocks_processed: &Arc, + http_origin: &str, + users: &[StacksPrivateKey], + account_before_nonces: &[u64], + batch_size: usize, + batches: usize, + index_block_hashes: &mut Vec, +) { + let mut all_mined_vec = vec![false; batches * batch_size]; + let mut account_after_nonces = vec![0; batches * batch_size]; + let mut all_mined = false; + for _k in 0..10 { + next_block_and_wait(btc_regtest_controller, blocks_processed); + sleep_ms(10_000); - // 2900 invocations - let oversize_contract_src = format!( - "(define-private (work) (begin {} 1)) - (define-private (times-100) (begin {} 1)) - (define-private (times-200) (begin (times-100) (times-100) 1)) - (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) - (define-private (times-1000) (begin (times-500) (times-500) 1)) - (times-1000) (times-1000) (times-500) (times-200) (times-200)", - (0..10) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" "), - (0..10) - .map(|_| "(work)".to_string()) - .collect::>() - .join(" "), - ); + let (ch, bhh) = get_chain_tip(http_origin); + let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - let spender_sk = StacksPrivateKey::random(); - let addr = to_addr(&spender_sk); - let second_spender_sk = StacksPrivateKey::random(); - let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); - let third_spender_sk = StacksPrivateKey::random(); - let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); + if let Some(last_ibh) = index_block_hashes.last() { + if *last_ibh != ibh { + index_block_hashes.push(ibh); + eprintln!("Tip is now {ibh}"); + } + } - let (mut conf, _miner_account) = neon_integration_test_conf(); + for j in 0..batches * batch_size { + let account_after = get_account(http_origin, &to_addr(&users[j])); + let account_after_nonce = account_after.nonce; + account_after_nonces[j] = account_after_nonce; - conf.initial_balances.push(InitialBalance { - address: addr.into(), - amount: 10_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: second_spender_addr.clone(), - amount: 10_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: third_spender_addr.clone(), - amount: 10_000_000, - }); + if account_before_nonces[j] < account_after_nonce { + all_mined_vec[j] = true; + } + } - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; - conf.node.microblock_frequency = 1000; + all_mined = all_mined_vec.iter().all(|elem| *elem); + if all_mined { + break; + } + } + if !all_mined { + panic!( + "Failed to mine all transactions: nonces = {account_after_nonces:?}, expected {account_before_nonces:?} + {batch_size}" + ); + } +} - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; +#[test] +#[ignore] +fn atlas_stress_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - // included in first block - let tx = make_contract_publish( - &spender_sk, - 0, - 555_000, - conf.burnchain.chain_id, - "over", - &oversize_contract_src, - ); - // contract limit hit; included in second block - let tx_2 = make_contract_publish( - &spender_sk, - 1, - 555_000, - conf.burnchain.chain_id, - "over-2", - &oversize_contract_src, - ); - // skipped over since contract limit was hit; included in second block - let tx_3 = make_contract_publish( - &second_spender_sk, - 0, - 150_000, - conf.burnchain.chain_id, - "max", - &max_contract_src, - ); - // included in first block - let tx_4 = make_stacks_transfer( - &third_spender_sk, - 0, - 180, - conf.burnchain.chain_id, - &PrincipalData::from(addr), - 100, - ); + let mut initial_balances = vec![]; + let mut users = vec![]; + + let batches = 5; + let batch_size = 20; + + for _i in 0..(2 * batches * batch_size + 1) { + let user = StacksPrivateKey::random(); + let initial_balance_user = InitialBalance { + address: to_addr(&user).into(), + amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), + }; + users.push(user); + initial_balances.push(initial_balance_user); + } + + // Prepare the config of the bootstrap node + let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); + conf_bootstrap_node + .initial_balances + .append(&mut initial_balances.clone()); + + conf_bootstrap_node.miner.first_attempt_time_ms = u64::MAX; + conf_bootstrap_node.miner.subsequent_attempt_time_ms = u64::MAX; + + conf_bootstrap_node.node.mine_microblocks = true; + conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; + conf_bootstrap_node.node.wait_time_for_microblocks = 0; + conf_bootstrap_node.node.microblock_frequency = 0; + conf_bootstrap_node.miner.first_attempt_time_ms = 1_000_000; + conf_bootstrap_node.miner.subsequent_attempt_time_ms = 2_000_000; + conf_bootstrap_node.burnchain.max_rbf = 1000000; + conf_bootstrap_node.node.wait_time_for_blocks = 1_000; + + conf_bootstrap_node.node.always_use_affirmation_maps = false; + let user_1 = users.pop().unwrap(); + let initial_balance_user_1 = initial_balances.pop().unwrap(); + + // Start the attached observer test_observer::spawn(); - test_observer::register_any(&mut conf); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf_bootstrap_node.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf_bootstrap_node.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); + let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); // give the run loop some time to start up! wait_for_runloop(&blocks_processed); @@ -6322,3325 +6187,864 @@ fn block_limit_hit_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // submit all the transactions - let txid_1 = submit_tx(&http_origin, &tx); - let txid_2 = submit_tx(&http_origin, &tx_2); - let txid_3 = submit_tx(&http_origin, &tx_3); - let txid_4 = submit_tx(&http_origin, &tx_4); - - sleep_ms(5_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); - - let res = get_account(&http_origin, &addr); - assert_eq!(res.nonce, 2); - - let res = get_account(&http_origin, &second_spender_addr); - assert_eq!(res.nonce, 1); + let mut index_block_hashes = vec![]; - let res = get_account(&http_origin, &third_spender_addr); - assert_eq!(res.nonce, 1); + // Let's publish a (1) namespace-preorder, (2) namespace-reveal and (3) name-import in this mempool - let mined_block_events = test_observer::get_blocks(); - assert_eq!(mined_block_events.len(), 5); + // (define-public (namespace-preorder (hashed-salted-namespace (buff 20)) + // (stx-to-burn uint)) + let namespace = "passport"; + let salt = "some-salt"; + let salted_namespace = format!("{namespace}{salt}"); + let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); + let tx_1 = make_contract_call( + &user_1, + 0, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-preorder", + &[ + Value::buff_from(hashed_namespace.to_bytes().to_vec()).unwrap(), + Value::UInt(1000000000), + ], + ); - let tx_third_block = mined_block_events[3] - .get("transactions") - .unwrap() - .as_array() - .unwrap(); - assert_eq!(tx_third_block.len(), 3); - let txid_1_exp = tx_third_block[1].get("txid").unwrap().as_str().unwrap(); - let txid_4_exp = tx_third_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{txid_1}"), txid_1_exp); - assert_eq!(format!("0x{txid_4}"), txid_4_exp); - - let tx_fourth_block = mined_block_events[4] - .get("transactions") - .unwrap() - .as_array() - .unwrap(); - assert_eq!(tx_fourth_block.len(), 3); - let txid_2_exp = tx_fourth_block[1].get("txid").unwrap().as_str().unwrap(); - let txid_3_exp = tx_fourth_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{txid_2}"), txid_2_exp); - assert_eq!(format!("0x{txid_3}"), txid_3_exp); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -/// This test checks that the limit behavior in the miner works as expected during microblock -/// building. When we first hit the block limit, the limit behavior switches to -/// `CONTRACT_LIMIT_HIT`, during which stx transfers are still allowed, and contract related -/// transactions are skipped. -/// Note: the test is sensitive to the order in which transactions are mined; it is written -/// expecting that transactions are traversed in the order tx_1, tx_2, tx_3, and tx_4. -#[test] -#[ignore] -fn microblock_limit_hit_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let max_contract_src = format!( - "(define-private (work) (begin {} 1)) - (define-private (times-100) (begin {} 1)) - (define-private (times-200) (begin (times-100) (times-100) 1)) - (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) - (times-500) (times-200)", - (0..3) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" "), - (0..3) - .map(|_| "(work)".to_string()) - .collect::>() - .join(" "), - ); - - let oversize_contract_src = format!( - "(define-private (work) (begin {} 1)) - (define-private (times-100) (begin {} 1)) - (define-private (times-200) (begin (times-100) (times-100) 1)) - (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) - (define-private (times-1000) (begin (times-500) (times-500) 1)) - (times-1000) (times-1000) (times-500) (times-200) (times-200)", - (0..3) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" "), - (0..3) - .map(|_| "(work)".to_string()) - .collect::>() - .join(" "), - ); - - let spender_sk = StacksPrivateKey::random(); - let addr = to_addr(&spender_sk); - let second_spender_sk = StacksPrivateKey::random(); - let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); - let third_spender_sk = StacksPrivateKey::random(); - let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); - - let (mut conf, _) = neon_integration_test_conf(); - - conf.initial_balances.push(InitialBalance { - address: addr.into(), - amount: 10_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: second_spender_addr.clone(), - amount: 10_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: third_spender_addr.clone(), - amount: 10_000_000, - }); - - conf.node.mine_microblocks = true; - // conf.node.wait_time_for_microblocks = 30000; - conf.node.wait_time_for_microblocks = 1000; - conf.node.microblock_frequency = 1000; - - conf.miner.microblock_attempt_time_ms = i64::MAX as u64; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 10_000, - block_limit: ExecutionCost { - write_length: 150000000, - write_count: 50000, - read_length: 1000000000, - read_count: 5000, // make read_count smaller so we hit the read_count limit with a smaller tx. - runtime: 100_000_000_000, - }, - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 10_000, - end_height: 10_002, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); - - // included in the first block - let tx = make_contract_publish_microblock_only( - &spender_sk, - 0, - 555_000, - conf.burnchain.chain_id, - "over", - &oversize_contract_src, - ); - // contract limit hit; included in second block - let tx_2 = make_contract_publish_microblock_only( - &spender_sk, - 1, - 555_000, - conf.burnchain.chain_id, - "over-2", - &oversize_contract_src, - ); - // skipped over since contract limit was hit; included in second block - let tx_3 = make_contract_publish_microblock_only( - &second_spender_sk, - 0, - 150_000, - conf.burnchain.chain_id, - "max", - &max_contract_src, - ); - // included in first block - let tx_4 = make_stacks_transfer_mblock_only( - &third_spender_sk, - 0, - 180, - conf.burnchain.chain_id, - &PrincipalData::from(addr), - 100, - ); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // submit all the transactions - let txid_1 = submit_tx(&http_origin, &tx); - let txid_2 = submit_tx(&http_origin, &tx_2); - let txid_3 = submit_tx(&http_origin, &tx_3); - let txid_4 = submit_tx(&http_origin, &tx_4); - - eprintln!("transactions: {txid_1},{txid_2},{txid_3},{txid_4}"); - - sleep_ms(50_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(50_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(50_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(50_000); - - loop { - let res = get_account(&http_origin, &addr); - if res.nonce < 2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(50_000); - } else { - break; - } - } - - let res = get_account(&http_origin, &addr); - assert_eq!(res.nonce, 2); - - let res = get_account(&http_origin, &second_spender_addr); - assert_eq!(res.nonce, 1); - - let res = get_account(&http_origin, &third_spender_addr); - assert_eq!(res.nonce, 1); - - let mined_mblock_events = test_observer::get_microblocks(); - assert!(mined_mblock_events.len() >= 2); - - let tx_first_mblock = mined_mblock_events[0] - .get("transactions") - .unwrap() - .as_array() - .unwrap(); - assert_eq!(tx_first_mblock.len(), 2); - let txid_1_exp = tx_first_mblock[0].get("txid").unwrap().as_str().unwrap(); - let txid_4_exp = tx_first_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{txid_1}"), txid_1_exp); - assert_eq!(format!("0x{txid_4}"), txid_4_exp); - - let tx_second_mblock = mined_mblock_events[1] - .get("transactions") - .unwrap() - .as_array() - .unwrap(); - assert_eq!(tx_second_mblock.len(), 2); - let txid_2_exp = tx_second_mblock[0].get("txid").unwrap().as_str().unwrap(); - let txid_3_exp = tx_second_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{txid_2}"), txid_2_exp); - assert_eq!(format!("0x{txid_3}"), txid_3_exp); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn block_large_tx_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let small_contract_src = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..700) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" ") - ); - - let oversize_contract_src = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..3500) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" ") - ); - - let spender_sk = StacksPrivateKey::random(); - let spender_addr = to_addr(&spender_sk); - - let (mut conf, miner_account) = neon_integration_test_conf(); - test_observer::spawn(); - test_observer::register_any(&mut conf); - - conf.initial_balances.push(InitialBalance { - address: spender_addr.into(), - amount: 10000000, - }); - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; - conf.node.microblock_frequency = 1000; - - conf.miner.microblock_attempt_time_ms = i64::MAX as u64; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - // higher fee for tx means it will get mined first - let tx = make_contract_publish( - &spender_sk, - 0, - 671_000, - conf.burnchain.chain_id, - "small", - &small_contract_src, - ); - let tx_2 = make_contract_publish( - &spender_sk, - 1, - 670_000, - conf.burnchain.chain_id, - "over", - &oversize_contract_src, - ); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10000000); - - let normal_txid = submit_tx(&http_origin, &tx); - let huge_txid = submit_tx(&http_origin, &tx_2); - - eprintln!("Try to mine a too-big tx. Normal = {normal_txid}, TooBig = {huge_txid}"); - next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); - - eprintln!("Finished trying to mine a too-big tx"); - - let dropped_txs = test_observer::get_memtx_drops(); - assert_eq!(dropped_txs.len(), 1); - assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -#[allow(non_snake_case)] -fn microblock_large_tx_integration_test_FLAKY() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let small_contract_src = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..700) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" ") - ); - - // publishing this contract takes up >80% of the read_count budget (which is 50000) - let oversize_contract_src = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..3500) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" ") - ); - - let spender_sk = StacksPrivateKey::random(); - let addr = to_addr(&spender_sk); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - conf.initial_balances.push(InitialBalance { - address: addr.into(), - amount: 10000000, - }); - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; - conf.node.microblock_frequency = 1000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; - - let tx = make_contract_publish_microblock_only( - &spender_sk, - 0, - 150_000, - conf.burnchain.chain_id, - "small", - &small_contract_src, - ); - let tx_2 = make_contract_publish_microblock_only( - &spender_sk, - 1, - 670_000, - conf.burnchain.chain_id, - "over", - &oversize_contract_src, - ); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - let account = get_account(&http_origin, &addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10000000); - - submit_tx(&http_origin, &tx); - let huge_txid = submit_tx(&http_origin, &tx_2); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Check that the microblock contains the first tx. - let microblock_events = test_observer::get_microblocks(); - assert!(!microblock_events.is_empty()); - - let microblock = microblock_events[0].clone(); - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); - assert_eq!(transactions.len(), 1); - let status = transactions[0].get("status").unwrap().as_str().unwrap(); - assert_eq!(status, "success"); - - // Check that the tx that triggered TransactionTooLargeError when being processed is dropped - // from the mempool. - let dropped_txs = test_observer::get_memtx_drops(); - assert_eq!(dropped_txs.len(), 1); - assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn pox_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sk = StacksPrivateKey::random(); - let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - - let spender_2_sk = StacksPrivateKey::random(); - let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - - let spender_3_sk = StacksPrivateKey::random(); - let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); - - let pox_pubkey = Secp256k1PublicKey::from_hex( - "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", - ) - .unwrap(); - let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); - - let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::random()); - let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); - - let pox_2_address = BitcoinAddress::from_bytes_legacy( - BitcoinNetworkType::Testnet, - LegacyBitcoinAddressType::PublicKeyHash, - &Hash160::from_node_public_key(&pox_2_pubkey).to_bytes(), - ) - .unwrap(); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - // required for testing post-sunset behavior - conf.node.always_use_affirmation_maps = false; - - let first_bal = 6_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); - let second_bal = 2_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); - let third_bal = 2_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); - let stacked_bal = 1_000_000_000 * u128::from(core::MICROSTACKS_PER_STACKS); - - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: first_bal, - }); - - conf.initial_balances.push(InitialBalance { - address: spender_2_addr, - amount: second_bal, - }); - - conf.initial_balances.push(InitialBalance { - address: spender_3_addr, - amount: third_bal, - }); - - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 1_000; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - // reward cycle length = 15, so 10 reward cycle slots + 5 prepare-phase burns - let reward_cycle_len = 15; - let prepare_phase_len = 5; - let pox_constants = PoxConstants::new( - reward_cycle_len, - prepare_phase_len, - 4 * prepare_phase_len / 5, - 5, - 15, - (16 * reward_cycle_len - 1).into(), - (17 * reward_cycle_len).into(), - u32::MAX, - u32::MAX, - u32::MAX, - u32::MAX, - ); - burnchain_config.pox_constants = pox_constants.clone(); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - let burnchain = burnchain_config.clone(); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let sort_height = channel.get_sortitions_processed(); - - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 1); - - // and our potential spenders: - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, first_bal as u128); - assert_eq!(account.nonce, 0); - - let pox_info = get_pox_info(&http_origin).unwrap(); - - assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); - assert_eq!(pox_info.first_burnchain_block_height, 0); - assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.stacked_ustx, 0); - assert!(!pox_info.current_cycle.is_pox_active); - assert_eq!(pox_info.next_cycle.stacked_ustx, 0); - assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); - assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 210); - assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 205); - assert_eq!(pox_info.next_cycle.min_increment_ustx, 1250710410920); - assert_eq!( - pox_info.prepare_cycle_length as u32, - pox_constants.prepare_length - ); - assert_eq!( - pox_info.rejection_fraction, - Some(pox_constants.pox_rejection_fraction) - ); - let reward_cycle = burnchain - .block_height_to_reward_cycle(sort_height) - .expect("Expected to be able to get reward cycle"); - assert_eq!(pox_info.reward_cycle_id, reward_cycle); - assert_eq!(pox_info.current_cycle.id, reward_cycle); - assert_eq!(pox_info.next_cycle.id, reward_cycle + 1); - assert_eq!( - pox_info.reward_cycle_length as u32, - pox_constants.reward_cycle_length - ); - assert_eq!(pox_info.total_liquid_supply_ustx, 10005683287360023); - assert_eq!(pox_info.next_reward_cycle_in, 6); - - let tx = make_contract_call( - &spender_sk, - 0, - 260, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "pox", - "stack-stx", - &[ - Value::UInt(stacked_bal), - execute( - &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), - ClarityVersion::Clarity1, - ) - .unwrap() - .unwrap(), - Value::UInt(sort_height as u128), - Value::UInt(6), - ], - ); - - // okay, let's push that stacking transaction! - submit_tx(&http_origin, &tx); - - let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - test_observer::clear(); - - // now let's mine until the next reward cycle starts ... - while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - let pox_info = get_pox_info(&http_origin).unwrap(); - let reward_cycle = burnchain - .block_height_to_reward_cycle(sort_height) - .expect("Expected to be able to get reward cycle"); - - assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); - assert_eq!(pox_info.first_burnchain_block_height, 0); - assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); - assert!(pox_info.pox_activation_threshold_ustx > 1500000000000000); - assert!(!pox_info.current_cycle.is_pox_active); - assert_eq!(pox_info.next_cycle.stacked_ustx, 1000000000000000); - assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); - assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); - assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 220); - assert_eq!( - pox_info.prepare_cycle_length as u32, - pox_constants.prepare_length - ); - assert_eq!( - pox_info.rejection_fraction, - Some(pox_constants.pox_rejection_fraction) - ); - assert_eq!(pox_info.reward_cycle_id, reward_cycle); - assert_eq!(pox_info.current_cycle.id, reward_cycle); - assert_eq!(pox_info.next_cycle.id, reward_cycle + 1); - assert_eq!( - pox_info.reward_cycle_length as u32, - pox_constants.reward_cycle_length - ); - assert_eq!(pox_info.next_reward_cycle_in, 14); - - let blocks_observed = test_observer::get_blocks(); - assert!( - blocks_observed.len() >= 2, - "Blocks observed {} should be >= 2", - blocks_observed.len() - ); - - // look up the return value of our stacking operation... - let mut tested = false; - for block in blocks_observed.iter() { - if tested { - break; - } - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "stack-stx" { - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - // should unlock at height 300 (we're in reward cycle 13, lockup starts in reward cycle - // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward - // cycle length of 15 blocks, is a burnchain height of 300) - assert_eq!(parsed.to_string(), - format!("(ok (tuple (lock-amount u1000000000000000) (stacker {spender_addr}) (unlock-burn-height u300)))")); - tested = true; - } - } - } - } - - assert!(tested, "Should have observed stack-stx transaction"); - - // let's stack with spender 2 and spender 3... - - // now let's have sender_2 and sender_3 stack to pox spender_addr 2 in - // two different txs, and make sure that they sum together in the reward set. - - let tx = make_contract_call( - &spender_2_sk, - 0, - 260, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "pox", - "stack-stx", - &[ - Value::UInt(stacked_bal / 2), - execute( - &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), - ClarityVersion::Clarity1, - ) - .unwrap() - .unwrap(), - Value::UInt(sort_height as u128), - Value::UInt(6), - ], - ); - - // okay, let's push that stacking transaction! - submit_tx(&http_origin, &tx); - - let tx = make_contract_call( - &spender_3_sk, - 0, - 260, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "pox", - "stack-stx", - &[ - Value::UInt(stacked_bal / 2), - execute( - &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), - ClarityVersion::Clarity1, - ) - .unwrap() - .unwrap(), - Value::UInt(sort_height as u128), - Value::UInt(6), - ], - ); - - submit_tx(&http_origin, &tx); - - // mine until the end of the current reward cycle. - sort_height = channel.get_sortitions_processed(); - while sort_height < ((15 * pox_constants.reward_cycle_length) - 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - let pox_info = get_pox_info(&http_origin).unwrap(); - - assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); - assert_eq!(pox_info.first_burnchain_block_height, 0); - assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); - assert!(!pox_info.current_cycle.is_pox_active); - assert_eq!(pox_info.next_cycle.stacked_ustx, 2000000000000000); - assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); - assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); - assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 220); - assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); - assert_eq!( - pox_info.prepare_cycle_length as u32, - pox_constants.prepare_length - ); - assert_eq!( - pox_info.rejection_fraction, - Some(pox_constants.pox_rejection_fraction) - ); - assert_eq!(pox_info.reward_cycle_id, 14); - assert_eq!(pox_info.current_cycle.id, 14); - assert_eq!(pox_info.next_cycle.id, 15); - assert_eq!( - pox_info.reward_cycle_length as u32, - pox_constants.reward_cycle_length - ); - assert_eq!(pox_info.next_reward_cycle_in, 1); - - // we should have received _no_ Bitcoin commitments, because the pox participation threshold - // was not met! - let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 0, - "Should have received no outputs during PoX reward cycle" - ); - - // let's test the reward information in the observer - test_observer::clear(); - - // before sunset - // mine until the end of the next reward cycle, - // the participation threshold now should be met. - while sort_height < ((16 * pox_constants.reward_cycle_length) - 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - let pox_info = get_pox_info(&http_origin).unwrap(); - - assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); - assert_eq!(pox_info.first_burnchain_block_height, 0); - assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.stacked_ustx, 2000000000000000); - assert!(pox_info.current_cycle.is_pox_active); - assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 240); - assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 235); - assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); - assert_eq!(pox_info.next_reward_cycle_in, 1); - - // we should have received _seven_ Bitcoin commitments, because our commitment was 7 * threshold - let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); - - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 7, - "Should have received outputs during PoX reward cycle" - ); - - // we should have received _seven_ Bitcoin commitments to pox_2_pubkey, because our commitment was 7 * threshold - // note: that if the reward set "summing" isn't implemented, this recipient would only have received _6_ slots, - // because each `stack-stx` call only received enough to get 3 slot individually. - let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 7, - "Should have received outputs during PoX reward cycle" - ); - - let burn_blocks = test_observer::get_burn_blocks(); - let mut recipient_slots: HashMap = HashMap::new(); - - for block in burn_blocks.iter() { - let reward_slot_holders = block - .get("reward_slot_holders") - .unwrap() - .as_array() - .unwrap() - .iter() - .map(|x| x.as_str().unwrap().to_string()); - for holder in reward_slot_holders { - if let Some(current) = recipient_slots.get_mut(&holder) { - *current += 1; - } else { - recipient_slots.insert(holder, 1); - } - } - } - - let pox_1_address = BitcoinAddress::from_bytes_legacy( - BitcoinNetworkType::Testnet, - LegacyBitcoinAddressType::PublicKeyHash, - &Hash160::from_node_public_key(&pox_pubkey).to_bytes(), - ) - .unwrap(); - - assert_eq!(recipient_slots.len(), 2); - assert_eq!( - recipient_slots.get(&format!("{pox_2_address}")).cloned(), - Some(7u64) - ); - assert_eq!( - recipient_slots.get(&format!("{pox_1_address}")).cloned(), - Some(7u64) - ); - - // get the canonical chain tip - let tip_info = get_chain_info(&conf); - - eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); - assert_eq!(tip_info.stacks_tip_height, 36); - - // now let's mine into the sunset - while sort_height < ((17 * pox_constants.reward_cycle_length) - 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - // get the canonical chain tip - let tip_info = get_chain_info(&conf); - - eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); - assert_eq!(tip_info.stacks_tip_height, 51); - - let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - - // should receive more rewards during this cycle... - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 14, - "Should have received more outputs during the sunsetting PoX reward cycle" - ); - - // and after sunset - while sort_height < ((18 * pox_constants.reward_cycle_length) - 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - - // should *not* receive more rewards during the after sunset cycle... - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 14, - "Should have received no more outputs after sunset PoX reward cycle" - ); - - // should have progressed the chain, though! - // get the canonical chain tip - let tip_info = get_chain_info(&conf); - - eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); - assert_eq!(tip_info.stacks_tip_height, 66); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[derive(Debug)] -enum Signal { - BootstrapNodeReady, - FollowerNodeReady, - ReplicatingAttachmentsStartTest1, - ReplicatingAttachmentsCheckTest1(u64), - ReplicatingAttachmentsStartTest2, - ReplicatingAttachmentsCheckTest2(u64), -} - -#[test] -#[ignore] -fn atlas_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let user_1 = StacksPrivateKey::random(); - let initial_balance_user_1 = InitialBalance { - address: to_addr(&user_1).into(), - amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), - }; - - // Prepare the config of the bootstrap node - let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); - let bootstrap_node_public_key = { - let keychain = Keychain::default(conf_bootstrap_node.node.seed.clone()); - let mut pk = keychain.generate_op_signer().get_public_key(); - pk.set_compressed(true); - pk.to_hex() - }; - conf_bootstrap_node - .initial_balances - .push(initial_balance_user_1.clone()); - - conf_bootstrap_node.node.always_use_affirmation_maps = false; - - // Prepare the config of the follower node - let (mut conf_follower_node, _) = neon_integration_test_conf(); - let bootstrap_node_url = format!( - "{}@{}", - bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind - ); - conf_follower_node.node.set_bootstrap_nodes( - bootstrap_node_url, - conf_follower_node.burnchain.chain_id, - conf_follower_node.burnchain.peer_version, - ); - conf_follower_node.node.miner = false; - conf_follower_node - .initial_balances - .push(initial_balance_user_1.clone()); - conf_follower_node - .events_observers - .insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - disable_retries: false, - }); - - conf_follower_node.node.always_use_affirmation_maps = false; - - // Our 2 nodes will share the bitcoind node - let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let (bootstrap_node_tx, bootstrap_node_rx) = mpsc::channel(); - let (follower_node_tx, follower_node_rx) = mpsc::channel(); - - let bootstrap_node_thread = thread::spawn(move || { - let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_bootstrap_node.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf_bootstrap_node.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Let's setup the follower now. - follower_node_tx - .send(Signal::BootstrapNodeReady) - .expect("Unable to send signal"); - - match bootstrap_node_rx.recv() { - Ok(Signal::ReplicatingAttachmentsStartTest1) => { - println!("Follower node is ready..."); - } - _ => panic!("Bootstrap node could nod boot. Aborting test."), - }; - - // Let's publish a (1) namespace-preorder, (2) namespace-reveal and (3) name-import in this mempool - - // (define-public (namespace-preorder (hashed-salted-namespace (buff 20)) - // (stx-to-burn uint)) - let namespace = "passport"; - let salt = "some-salt"; - let salted_namespace = format!("{namespace}{salt}"); - let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); - let tx_1 = make_contract_call( - &user_1, - 0, - 260, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-preorder", - &[ - Value::buff_from(hashed_namespace.to_bytes().to_vec()).unwrap(), - Value::UInt(1000000000), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_1.clone()) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx_1[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - // (define-public (namespace-reveal (namespace (buff 20)) - // (namespace-salt (buff 20)) - // (p-func-base uint) - // (p-func-coeff uint) - // (p-func-b1 uint) - // (p-func-b2 uint) - // (p-func-b3 uint) - // (p-func-b4 uint) - // (p-func-b5 uint) - // (p-func-b6 uint) - // (p-func-b7 uint) - // (p-func-b8 uint) - // (p-func-b9 uint) - // (p-func-b10 uint) - // (p-func-b11 uint) - // (p-func-b12 uint) - // (p-func-b13 uint) - // (p-func-b14 uint) - // (p-func-b15 uint) - // (p-func-b16 uint) - // (p-func-non-alpha-discount uint) - // (p-func-no-vowel-discount uint) - // (lifetime uint) - // (namespace-import principal)) - let tx_2 = make_contract_call( - &user_1, - 1, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-reveal", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(salt.as_bytes().to_vec()).unwrap(), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1000), - Value::Principal(initial_balance_user_1.address.clone()), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_2.clone()) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx_2[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - // (define-public (name-import (namespace (buff 20)) - // (name (buff 48)) - // (zonefile-hash (buff 20))) - let zonefile_hex = "facade00"; - let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); - let tx_3 = make_contract_call( - &user_1, - 2, - 500, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-import", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from("johndoe".as_bytes().to_vec()).unwrap(), - Value::Principal(to_addr(&user_1).into()), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_3), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - // From there, let's mine these transaction, and build more blocks. - let mut sort_height = channel.get_sortitions_processed(); - let few_blocks = sort_height + 10; - - while sort_height < few_blocks { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - // Then check that the follower is correctly replicating the attachment - follower_node_tx - .send(Signal::ReplicatingAttachmentsCheckTest1(sort_height)) - .expect("Unable to send signal"); - - match bootstrap_node_rx.recv() { - Ok(Signal::ReplicatingAttachmentsStartTest2) => { - println!("Follower node is ready..."); - } - _ => panic!("Bootstrap node could nod boot. Aborting test."), - }; - - // From there, let's mine these transaction, and build more blocks. - let mut sort_height = channel.get_sortitions_processed(); - let few_blocks = sort_height + 10; - - while sort_height < few_blocks { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - // Poll GET v2/attachments/ - for i in 1..10 { - let mut attachments_did_sync = false; - let mut timeout = 60; - while !attachments_did_sync { - let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); - let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); - let res = client - .get(&path) - .header("Content-Type", "application/json") - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let attachment_response: GetAttachmentResponse = res.json().unwrap(); - assert_eq!(attachment_response.attachment.content, zonefile_hex); - attachments_did_sync = true; - } else { - timeout -= 1; - if timeout == 0 { - panic!("Failed syncing 9 attachments between 2 neon runloops within 60s (failed at {}) - Something is wrong", &to_hex(&zonefile_hex)); - } - eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); - thread::sleep(Duration::from_millis(1000)); - } - } - } - - // Then check that the follower is correctly replicating the attachment - follower_node_tx - .send(Signal::ReplicatingAttachmentsCheckTest2(sort_height)) - .expect("Unable to send signal"); - - channel.stop_chains_coordinator(); - }); - - // Start the attached observer - test_observer::spawn(); - - // The bootstrap node mined a few blocks and is ready, let's setup this node. - match follower_node_rx.recv() { - Ok(Signal::BootstrapNodeReady) => { - println!("Booting follower node..."); - } - _ => panic!("Bootstrap node could nod boot. Aborting test."), - }; - - let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); - let chain_id = conf_follower_node.burnchain.chain_id; - let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_follower_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // Follower node is ready, the bootstrap node will now handover - bootstrap_node_tx - .send(Signal::ReplicatingAttachmentsStartTest1) - .expect("Unable to send signal"); - - // The bootstrap node published and mined a transaction that includes an attachment. - // Lets observe the attachments replication kicking in. - let target_height = match follower_node_rx.recv() { - Ok(Signal::ReplicatingAttachmentsCheckTest1(target_height)) => target_height, - _ => panic!("Bootstrap node could nod boot. Aborting test."), - }; - - let mut sort_height = channel.get_sortitions_processed(); - while sort_height < target_height { - wait_for_runloop(&blocks_processed); - sort_height = channel.get_sortitions_processed(); - } - - // Now wait for the node to sync the attachment - let mut attachments_did_sync = false; - let mut timeout = 60; - while !attachments_did_sync { - let zonefile_hex = "facade00"; - let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); - let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); - let res = client - .get(&path) - .header("Content-Type", "application/json") - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - eprintln!("Success syncing attachment - {}", res.text().unwrap()); - attachments_did_sync = true; - } else { - timeout -= 1; - if timeout == 0 { - panic!("Failed syncing 1 attachments between 2 neon runloops within 60s - Something is wrong"); - } - eprintln!("Attachment {zonefile_hex} not sync'd yet"); - thread::sleep(Duration::from_millis(1000)); - } - } - - // Test 2: 9 transactions are posted to the follower. - // We want to make sure that the miner is able to - // 1) mine these transactions - // 2) retrieve the attachments staged on the follower node. - // 3) ensure that the follower is also instantiating the attachments after - // executing the transactions, once mined. - let namespace = "passport"; - for i in 1..10 { - let user = StacksPrivateKey::random(); - let zonefile_hex = format!("facade0{i}"); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - let name = format!("johndoe{i}"); - let tx = make_contract_call( - &user_1, - 2 + i, - 500, - chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-import", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(name.as_bytes().to_vec()).unwrap(), - Value::Principal(to_addr(&user).into()), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - } - - bootstrap_node_tx - .send(Signal::ReplicatingAttachmentsStartTest2) - .expect("Unable to send signal"); - - let target_height = match follower_node_rx.recv() { - Ok(Signal::ReplicatingAttachmentsCheckTest2(target_height)) => target_height, - _ => panic!("Bootstrap node could not boot. Aborting test."), - }; - - let mut sort_height = channel.get_sortitions_processed(); - while sort_height < target_height { - wait_for_runloop(&blocks_processed); - sort_height = channel.get_sortitions_processed(); - } - - // Poll GET v2/attachments/ - for i in 1..10 { - let mut attachments_did_sync = false; - let mut timeout = 60; - while !attachments_did_sync { - let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); - let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); - let res = client - .get(&path) - .header("Content-Type", "application/json") - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let attachment_response: GetAttachmentResponse = res.json().unwrap(); - assert_eq!(attachment_response.attachment.content, zonefile_hex); - attachments_did_sync = true; - } else { - timeout -= 1; - if timeout == 0 { - panic!("Failed syncing 9 attachments between 2 neon runloops within 60s (failed at {}) - Something is wrong", &to_hex(&zonefile_hex)); - } - eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); - thread::sleep(Duration::from_millis(1000)); - } - } - } - - // Ensure that we the attached sidecar was able to receive a total of 10 attachments - // This last assertion is flacky for some reason, it does not worth bullying the CI or disabling this whole test - // We're using an inequality as a best effort, to make sure that **some** attachments were received. - assert!(!test_observer::get_attachments().is_empty()); - test_observer::clear(); - channel.stop_chains_coordinator(); - - bootstrap_node_thread.join().unwrap(); -} - -#[test] -#[ignore] -fn antientropy_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let user_1 = StacksPrivateKey::random(); - let initial_balance_user_1 = InitialBalance { - address: to_addr(&user_1).into(), - amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), - }; - - // Prepare the config of the bootstrap node - let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); - let bootstrap_node_public_key = { - let keychain = Keychain::default(conf_bootstrap_node.node.seed.clone()); - let mut pk = keychain.generate_op_signer().get_public_key(); - pk.set_compressed(true); - pk.to_hex() - }; - conf_bootstrap_node - .initial_balances - .push(initial_balance_user_1.clone()); - conf_bootstrap_node.connection_options.antientropy_retry = 10; // move this along -- do anti-entropy protocol once every 10 seconds - conf_bootstrap_node.connection_options.antientropy_public = true; // always push blocks, even if we're not NAT'ed - conf_bootstrap_node.connection_options.max_block_push = 1000; - conf_bootstrap_node.connection_options.max_microblock_push = 1000; - - conf_bootstrap_node.node.mine_microblocks = true; - conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; - conf_bootstrap_node.node.wait_time_for_microblocks = 0; - conf_bootstrap_node.node.microblock_frequency = 0; - conf_bootstrap_node.miner.first_attempt_time_ms = 1_000_000; - conf_bootstrap_node.miner.subsequent_attempt_time_ms = 1_000_000; - conf_bootstrap_node.burnchain.max_rbf = 1000000; - conf_bootstrap_node.node.wait_time_for_blocks = 1_000; - - conf_bootstrap_node.node.always_use_affirmation_maps = false; - - // Prepare the config of the follower node - let (mut conf_follower_node, _) = neon_integration_test_conf(); - let bootstrap_node_url = format!( - "{bootstrap_node_public_key}@{}", - conf_bootstrap_node.node.p2p_bind - ); - conf_follower_node.connection_options.disable_block_download = true; - conf_follower_node.node.set_bootstrap_nodes( - bootstrap_node_url, - conf_follower_node.burnchain.chain_id, - conf_follower_node.burnchain.peer_version, - ); - conf_follower_node.node.miner = false; - conf_follower_node - .initial_balances - .push(initial_balance_user_1); - conf_follower_node - .events_observers - .insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - disable_retries: false, - }); - - conf_follower_node.node.mine_microblocks = true; - conf_follower_node.miner.microblock_attempt_time_ms = 2_000; - conf_follower_node.node.wait_time_for_microblocks = 0; - conf_follower_node.node.microblock_frequency = 0; - conf_follower_node.miner.first_attempt_time_ms = 1_000_000; - conf_follower_node.miner.subsequent_attempt_time_ms = 1_000_000; - conf_follower_node.burnchain.max_rbf = 1000000; - conf_follower_node.node.wait_time_for_blocks = 1_000; - - conf_follower_node.node.always_use_affirmation_maps = false; - - // Our 2 nodes will share the bitcoind node - let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let (bootstrap_node_tx, bootstrap_node_rx) = mpsc::channel(); - let (follower_node_tx, follower_node_rx) = mpsc::channel(); - - let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); - let target_height = 3 + (3 * burnchain_config.pox_constants.reward_cycle_length); - - let bootstrap_node_thread = thread::spawn(move || { - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_bootstrap_node.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - for i in 0..(target_height - 3) { - eprintln!("Mine block {i}"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - // Let's setup the follower now. - follower_node_tx - .send(Signal::BootstrapNodeReady) - .expect("Unable to send signal"); - - eprintln!("Bootstrap node informed follower that it's ready; waiting for acknowledgement"); - - // wait for bootstrap node to terminate - match bootstrap_node_rx.recv() { - Ok(Signal::FollowerNodeReady) => { - println!("Follower has finished"); - } - Ok(x) => { - panic!("Follower gave a bad signal: {x:?}"); - } - Err(e) => { - panic!("Failed to recv: {e:?}"); - } - }; - - channel.stop_chains_coordinator(); - }); - - // Start the attached observer - test_observer::spawn(); - - // The bootstrap node mined a few blocks and is ready, let's setup this node. - match follower_node_rx.recv() { - Ok(Signal::BootstrapNodeReady) => { - println!("Booting follower node..."); - } - _ => panic!("Bootstrap node could not boot. Aborting test."), - }; - - let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); - let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_follower_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - let thread_burnchain_config = burnchain_config.clone(); - thread::spawn(move || run_loop.start(Some(thread_burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - let mut sort_height = channel.get_sortitions_processed(); - while sort_height < (target_height + 200) as u64 { - eprintln!( - "Follower sortition is {sort_height}, target is {}", - target_height + 200 - ); - wait_for_runloop(&blocks_processed); - sort_height = channel.get_sortitions_processed(); - sleep_ms(1000); - } - - eprintln!("Follower booted up; waiting for blocks"); - - // wait for block height to reach target - let mut tip_height = get_chain_tip_height(&http_origin); - eprintln!( - "Follower Stacks tip height is {tip_height}, wait until {tip_height} >= {target_height} - 3" - ); - - let btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_follower_node.clone(), - None, - Some(burnchain_config), - None, - ); - - let mut burnchain_deadline = get_epoch_time_secs() + 60; - while tip_height < (target_height - 3) as u64 { - sleep_ms(1000); - tip_height = get_chain_tip_height(&http_origin); - - eprintln!("Follower Stacks tip height is {tip_height}"); - - if burnchain_deadline < get_epoch_time_secs() { - burnchain_deadline = get_epoch_time_secs() + 60; - btc_regtest_controller.build_next_block(1); - } - } - - bootstrap_node_tx - .send(Signal::FollowerNodeReady) - .expect("Unable to send signal"); - bootstrap_node_thread.join().unwrap(); - - eprintln!("Follower node finished"); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[allow(clippy::too_many_arguments)] -fn wait_for_mined( - btc_regtest_controller: &mut BitcoinRegtestController, - blocks_processed: &Arc, - http_origin: &str, - users: &[StacksPrivateKey], - account_before_nonces: &[u64], - batch_size: usize, - batches: usize, - index_block_hashes: &mut Vec, -) { - let mut all_mined_vec = vec![false; batches * batch_size]; - let mut account_after_nonces = vec![0; batches * batch_size]; - let mut all_mined = false; - for _k in 0..10 { - next_block_and_wait(btc_regtest_controller, blocks_processed); - sleep_ms(10_000); - - let (ch, bhh) = get_chain_tip(http_origin); - let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - - if let Some(last_ibh) = index_block_hashes.last() { - if *last_ibh != ibh { - index_block_hashes.push(ibh); - eprintln!("Tip is now {ibh}"); - } - } - - for j in 0..batches * batch_size { - let account_after = get_account(http_origin, &to_addr(&users[j])); - let account_after_nonce = account_after.nonce; - account_after_nonces[j] = account_after_nonce; - - if account_before_nonces[j] < account_after_nonce { - all_mined_vec[j] = true; - } - } - - all_mined = all_mined_vec.iter().all(|elem| *elem); - if all_mined { - break; - } - } - if !all_mined { - panic!( - "Failed to mine all transactions: nonces = {account_after_nonces:?}, expected {account_before_nonces:?} + {batch_size}" - ); - } -} - -#[test] -#[ignore] -fn atlas_stress_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let mut initial_balances = vec![]; - let mut users = vec![]; - - let batches = 5; - let batch_size = 20; - - for _i in 0..(2 * batches * batch_size + 1) { - let user = StacksPrivateKey::random(); - let initial_balance_user = InitialBalance { - address: to_addr(&user).into(), - amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), - }; - users.push(user); - initial_balances.push(initial_balance_user); - } - - // Prepare the config of the bootstrap node - let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); - conf_bootstrap_node - .initial_balances - .append(&mut initial_balances.clone()); - - conf_bootstrap_node.miner.first_attempt_time_ms = u64::MAX; - conf_bootstrap_node.miner.subsequent_attempt_time_ms = u64::MAX; - - conf_bootstrap_node.node.mine_microblocks = true; - conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; - conf_bootstrap_node.node.wait_time_for_microblocks = 0; - conf_bootstrap_node.node.microblock_frequency = 0; - conf_bootstrap_node.miner.first_attempt_time_ms = 1_000_000; - conf_bootstrap_node.miner.subsequent_attempt_time_ms = 2_000_000; - conf_bootstrap_node.burnchain.max_rbf = 1000000; - conf_bootstrap_node.node.wait_time_for_blocks = 1_000; - - conf_bootstrap_node.node.always_use_affirmation_maps = false; - - let user_1 = users.pop().unwrap(); - let initial_balance_user_1 = initial_balances.pop().unwrap(); - - // Start the attached observer - test_observer::spawn(); - - let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_bootstrap_node.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf_bootstrap_node.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let mut index_block_hashes = vec![]; - - // Let's publish a (1) namespace-preorder, (2) namespace-reveal and (3) name-import in this mempool - - // (define-public (namespace-preorder (hashed-salted-namespace (buff 20)) - // (stx-to-burn uint)) - let namespace = "passport"; - let salt = "some-salt"; - let salted_namespace = format!("{namespace}{salt}"); - let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); - let tx_1 = make_contract_call( - &user_1, - 0, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-preorder", - &[ - Value::buff_from(hashed_namespace.to_bytes().to_vec()).unwrap(), - Value::UInt(1000000000), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_1.clone()) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx_1[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - // (define-public (namespace-reveal (namespace (buff 20)) - // (namespace-salt (buff 20)) - // (p-func-base uint) - // (p-func-coeff uint) - // (p-func-b1 uint) - // (p-func-b2 uint) - // (p-func-b3 uint) - // (p-func-b4 uint) - // (p-func-b5 uint) - // (p-func-b6 uint) - // (p-func-b7 uint) - // (p-func-b8 uint) - // (p-func-b9 uint) - // (p-func-b10 uint) - // (p-func-b11 uint) - // (p-func-b12 uint) - // (p-func-b13 uint) - // (p-func-b14 uint) - // (p-func-b15 uint) - // (p-func-b16 uint) - // (p-func-non-alpha-discount uint) - // (p-func-no-vowel-discount uint) - // (lifetime uint) - // (namespace-import principal)) - let tx_2 = make_contract_call( - &user_1, - 1, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-reveal", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(salt.as_bytes().to_vec()).unwrap(), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1000), - Value::Principal(initial_balance_user_1.address), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_2.clone()) - .send() - .unwrap(); - eprintln!("{:#?}", res); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx_2[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - let mut mined_namespace_reveal = false; - for _j in 0..10 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); - - let account_after = get_account(&http_origin, &to_addr(&user_1)); - if account_after.nonce == 2 { - mined_namespace_reveal = true; - break; - } - } - assert!( - mined_namespace_reveal, - "Did not mine namespace preorder or reveal" - ); - - // make a _ton_ of name-imports - for i in 0..batches { - let account_before = get_account(&http_origin, &to_addr(&user_1)); - - for j in 0..batch_size { - // (define-public (name-import (namespace (buff 20)) - // (name (buff 48)) - // (zonefile-hash (buff 20))) - let zonefile_hex = format!("facade00{:04x}{:04x}{:04x}", batch_size * i + j, i, j); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - - let tx_3 = make_contract_call( - &user_1, - 2 + (batch_size * i + j) as u64, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-import", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(format!("johndoe{}", i * batch_size + j).as_bytes().to_vec()) - .unwrap(), - Value::Principal(to_addr(&users[i * batch_size + j]).into()), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_3), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - } - - // wait for them all to be mined - let mut all_mined = false; - let account_after_nonce = 0; - for _j in 0..10 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); - - let (ch, bhh) = get_chain_tip(&http_origin); - let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - index_block_hashes.push(ibh); - - let account_after = get_account(&http_origin, &to_addr(&user_1)); - let account_after_nonce = account_after.nonce; - if account_before.nonce + (batch_size as u64) <= account_after_nonce { - all_mined = true; - break; - } - } - assert!( - all_mined, - "Failed to mine all transactions: nonce = {account_after_nonce}, expected {}", - account_before.nonce + (batch_size as u64) - ); - } - - // launch namespace - // (define-public (namespace-ready (namespace (buff 20))) - let namespace = "passport"; - let tx_4 = make_contract_call( - &user_1, - 2 + (batch_size as u64) * (batches as u64), - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-ready", - &[Value::buff_from(namespace.as_bytes().to_vec()).unwrap()], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_4) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - let mut mined_namespace_ready = false; - for _j in 0..10 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); - - let (ch, bhh) = get_chain_tip(&http_origin); - let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - index_block_hashes.push(ibh); - - let account_after = get_account(&http_origin, &to_addr(&user_1)); - if account_after.nonce == 2 + (batch_size as u64) * (batches as u64) { - mined_namespace_ready = true; - break; - } - } - assert!(mined_namespace_ready, "Did not mine namespace ready"); - - // make a _ton_ of preorders - { - let mut account_before_nonces = vec![0; batches * batch_size]; - for j in 0..batches * batch_size { - let account_before = - get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); - account_before_nonces[j] = account_before.nonce; - - let fqn = format!("janedoe{j}.passport"); - let fqn_bytes = fqn.as_bytes().to_vec(); - let salt = format!("{:04x}", j); - let salt_bytes = salt.as_bytes().to_vec(); - let mut hash_data = fqn_bytes.clone(); - hash_data.append(&mut salt_bytes.clone()); - - let salted_hash = Hash160::from_data(&hash_data); - - let tx_5 = make_contract_call( - &users[batches * batch_size + j], - 0, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-preorder", - &[ - Value::buff_from(salted_hash.0.to_vec()).unwrap(), - Value::UInt(500), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_5.clone()) - .send() - .unwrap(); - - eprintln!( - "sent preorder for {}:\n{res:#?}", - &to_addr(&users[batches * batch_size + j]) - ); - if !res.status().is_success() { - panic!(""); - } - } - - wait_for_mined( - &mut btc_regtest_controller, - &blocks_processed, - &http_origin, - &users[batches * batch_size..], - &account_before_nonces, - batch_size, - batches, - &mut index_block_hashes, - ); - } - - // make a _ton_ of registers - { - let mut account_before_nonces = vec![0; batches * batch_size]; - for j in 0..batches * batch_size { - let account_before = - get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); - account_before_nonces[j] = account_before.nonce; - - let name = format!("janedoe{j}"); - let salt = format!("{j:04x}"); - - let zonefile_hex = format!("facade01{j:04x}"); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - - let tx_6 = make_contract_call( - &users[batches * batch_size + j], - 1, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-register", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(name.as_bytes().to_vec()).unwrap(), - Value::buff_from(salt.as_bytes().to_vec()).unwrap(), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_6), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - } - - wait_for_mined( - &mut btc_regtest_controller, - &blocks_processed, - &http_origin, - &users[batches * batch_size..], - &account_before_nonces, - batch_size, - batches, - &mut index_block_hashes, - ); - } - - // make a _ton_ of updates - { - let mut account_before_nonces = vec![0; batches * batch_size]; - for j in 0..batches * batch_size { - let account_before = - get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); - account_before_nonces[j] = account_before.nonce; - - let name = format!("janedoe{j}"); - let zonefile_hex = format!("facade02{j:04x}"); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - - let tx_7 = make_contract_call( - &users[batches * batch_size + j], - 2, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-update", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(name.as_bytes().to_vec()).unwrap(), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_7), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - } - - wait_for_mined( - &mut btc_regtest_controller, - &blocks_processed, - &http_origin, - &users[batches * batch_size..], - &account_before_nonces, - batch_size, - batches, - &mut index_block_hashes, - ); - } - - // make a _ton_ of renewals - { - let mut account_before_nonces = vec![0; batches * batch_size]; - for j in 0..batches * batch_size { - let account_before = - get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); - account_before_nonces[j] = account_before.nonce; - - let name = format!("janedoe{j}"); - let zonefile_hex = format!("facade03{j:04x}"); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - - let tx_8 = make_contract_call( - &users[batches * batch_size + j], - 3, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-renewal", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(name.as_bytes().to_vec()).unwrap(), - Value::UInt(500), - Value::none(), - Value::some(Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap()) - .unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_8), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - } - - wait_for_mined( - &mut btc_regtest_controller, - &blocks_processed, - &http_origin, - &users[batches * batch_size..], - &account_before_nonces, - batch_size, - batches, - &mut index_block_hashes, - ); - } - - // find all attachment indexes and make sure we can get them - let mut attachment_indexes = HashMap::new(); - let mut attachment_hashes = HashMap::new(); - { - let atlasdb_path = conf_bootstrap_node.get_atlas_db_file_path(); - let atlasdb = AtlasDB::connect(AtlasConfig::new(false), &atlasdb_path, false).unwrap(); - for ibh in index_block_hashes.iter() { - let indexes = query_rows::( - &atlasdb.conn, - "SELECT attachment_index FROM attachment_instances WHERE index_block_hash = ?1", - &[ibh], - ) - .unwrap(); - if !indexes.is_empty() { - attachment_indexes.insert(*ibh, indexes.clone()); - } - - for index in indexes.iter() { - let mut hashes = query_row_columns::( - &atlasdb.conn, - "SELECT content_hash FROM attachment_instances WHERE index_block_hash = ?1 AND attachment_index = ?2", - params![ibh, u64_to_sql(*index).unwrap()], - "content_hash") - .unwrap(); - if !hashes.is_empty() { - assert_eq!(hashes.len(), 1); - attachment_hashes.insert((*ibh, *index), hashes.pop()); - } - } - } - } - eprintln!("attachment_indexes = {attachment_indexes:?}"); - - let max_request_time_ms = 100; - - for (ibh, attachments) in attachment_indexes.iter() { - let l = attachments.len(); - for i in 0..(l / MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + 1) { - if i * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST >= l { - break; - } - - let attachments_batch = attachments[i * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST - ..cmp::min((i + 1) * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, l)] - .to_vec(); - let path = format!( - "{http_origin}/v2/attachments/inv?index_block_hash={ibh}&pages_indexes={}", - attachments_batch - .iter() - .map(|a| format!("{a}")) - .collect::>() - .join(",") - ); - - let attempts = 10; - let ts_begin = get_epoch_time_ms(); - for _ in 0..attempts { - let res = client.get(&path).send().unwrap(); - assert!( - res.status().is_success(), - "Bad response for `{path}`: `{:?}`", - res.text().unwrap() - ); - let attachment_inv_response: GetAttachmentsInvResponse = res.json().unwrap(); - eprintln!("attachment inv response for {path}: {attachment_inv_response:?}"); - } - let ts_end = get_epoch_time_ms(); - let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {path} {attempts} times in {total_time}ms"); - - // requests should take no more than max_request_time_ms - assert!( - total_time < attempts * max_request_time_ms, - "Atlas inventory request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" - ); - } - - for attachment in attachments.iter().take(l) { - if *attachment == 0 { - continue; - } - let content_hash = attachment_hashes - .get(&(*ibh, *attachment)) - .cloned() + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_1.clone()) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx_1[..]) .unwrap() - .unwrap(); - - let path = format!("{http_origin}/v2/attachments/{content_hash}"); - - let attempts = 10; - let ts_begin = get_epoch_time_ms(); - for _ in 0..attempts { - let res = client.get(&path).send().unwrap(); - assert!( - res.status().is_success(), - "Bad response for `{path}`: `{:?}`", - res.text().unwrap() - ); - let attachment_response: GetAttachmentResponse = res.json().unwrap(); - eprintln!("attachment response for {path}: {attachment_response:?}"); - } - let ts_end = get_epoch_time_ms(); - let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {path} {attempts} times in {total_time}ms"); - - // requests should take no more than max_request_time_ms - assert!( - total_time < attempts * max_request_time_ms, - "Atlas chunk request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" - ); - } + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); } - test_observer::clear(); -} + // (define-public (namespace-reveal (namespace (buff 20)) + // (namespace-salt (buff 20)) + // (p-func-base uint) + // (p-func-coeff uint) + // (p-func-b1 uint) + // (p-func-b2 uint) + // (p-func-b3 uint) + // (p-func-b4 uint) + // (p-func-b5 uint) + // (p-func-b6 uint) + // (p-func-b7 uint) + // (p-func-b8 uint) + // (p-func-b9 uint) + // (p-func-b10 uint) + // (p-func-b11 uint) + // (p-func-b12 uint) + // (p-func-b13 uint) + // (p-func-b14 uint) + // (p-func-b15 uint) + // (p-func-b16 uint) + // (p-func-non-alpha-discount uint) + // (p-func-no-vowel-discount uint) + // (lifetime uint) + // (namespace-import principal)) + let tx_2 = make_contract_call( + &user_1, + 1, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-reveal", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(salt.as_bytes().to_vec()).unwrap(), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1000), + Value::Principal(initial_balance_user_1.address), + ], + ); -/// Run a fixed contract 20 times. Linearly increase the amount paid each time. The cost of the -/// contract should stay the same, and the fee rate paid should monotonically grow. The value -/// should grow faster for lower values of `window_size`, because a bigger window slows down the -/// growth. -fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value: f64) { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_2.clone()) + .send() + .unwrap(); + eprintln!("{:#?}", res); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx_2[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); } - let max_contract_src = r#" -;; define counter variable -(define-data-var counter int 0) - -;; increment method -(define-public (increment) - (begin - (var-set counter (+ (var-get counter) 1)) - (ok (var-get counter)))) - - (define-public (increment-many) - (begin - (unwrap! (increment) (err u1)) - (unwrap! (increment) (err u1)) - (unwrap! (increment) (err u1)) - (unwrap! (increment) (err u1)) - (ok (var-get counter)))) - "#; - - let spender_sk = StacksPrivateKey::random(); - let spender_addr = to_addr(&spender_sk); - - let (mut conf, _) = neon_integration_test_conf(); - - // Set this estimator as special. - conf.estimation.fee_estimator = Some(FeeEstimatorName::FuzzedWeightedMedianFeeRate); - // Use randomness of 0 to keep test constant. Randomness is tested in unit tests. - conf.estimation.fee_rate_fuzzer_fraction = 0f64; - conf.estimation.fee_rate_window_size = window_size; - - conf.initial_balances.push(InitialBalance { - address: spender_addr.into(), - amount: 10000000000, - }); - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(200); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - wait_for_runloop(&blocks_processed); - run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 210, &conf); + let mut mined_namespace_reveal = false; + for _j in 0..10 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); - submit_tx( - &http_origin, - &make_contract_publish( - &spender_sk, - 0, - 110000, - conf.burnchain.chain_id, - "increment-contract", - max_contract_src, - ), + let account_after = get_account(&http_origin, &to_addr(&user_1)); + if account_after.nonce == 2 { + mined_namespace_reveal = true; + break; + } + } + assert!( + mined_namespace_reveal, + "Did not mine namespace preorder or reveal" ); - run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); - - // Loop 20 times. Each time, execute the same transaction, but increase the amount *paid*. - // This will exercise the window size. - let mut response_estimated_costs = vec![]; - let mut response_top_fee_rates = vec![]; - for i in 1..21 { - submit_tx( - &http_origin, - &make_contract_call( - &spender_sk, - i, // nonce - i * 100000, // payment - conf.burnchain.chain_id, - &spender_addr, - "increment-contract", - "increment-many", - &[], - ), - ); - run_until_burnchain_height( - &mut btc_regtest_controller, - &blocks_processed, - 212 + 2 * i, - &conf, - ); - - { - // Read from the fee estimation endpoin. - let path = format!("{http_origin}/v2/fees/transaction"); - let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: spender_addr, - contract_name: ContractName::from("increment-contract"), - function_name: ClarityName::from("increment-many"), - function_args: vec![], - }); + // make a _ton_ of name-imports + for i in 0..batches { + let account_before = get_account(&http_origin, &to_addr(&user_1)); - let payload_data = tx_payload.serialize_to_vec(); - let payload_hex = format!("0x{}", to_hex(&payload_data)); + for j in 0..batch_size { + // (define-public (name-import (namespace (buff 20)) + // (name (buff 48)) + // (zonefile-hash (buff 20))) + let zonefile_hex = format!("facade00{:04x}{:04x}{:04x}", batch_size * i + j, i, j); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - let body = json!({ "transaction_payload": payload_hex.clone() }); + let tx_3 = make_contract_call( + &user_1, + 2 + (batch_size * i + j) as u64, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-import", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(format!("johndoe{}", i * batch_size + j).as_bytes().to_vec()) + .unwrap(), + Value::Principal(to_addr(&users[i * batch_size + j]).into()), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); - let client = reqwest::blocking::Client::new(); - let fee_rate_result = client + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_3), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; + + let path = format!("{http_origin}/v2/transactions"); + let res = client .post(&path) - .json(&body) + .header("Content-Type", "application/json") + .body(body) .send() - .expect("Should be able to post") - .json::() - .expect("Failed to parse result into JSON"); + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } - response_estimated_costs.push(fee_rate_result.estimated_cost_scalar); - response_top_fee_rates.push(fee_rate_result.estimations.last().unwrap().fee_rate); + // wait for them all to be mined + let mut all_mined = false; + let account_after_nonce = 0; + for _j in 0..10 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); + + let (ch, bhh) = get_chain_tip(&http_origin); + let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); + index_block_hashes.push(ibh); + + let account_after = get_account(&http_origin, &to_addr(&user_1)); + let account_after_nonce = account_after.nonce; + if account_before.nonce + (batch_size as u64) <= account_after_nonce { + all_mined = true; + break; + } } + assert!( + all_mined, + "Failed to mine all transactions: nonce = {account_after_nonce}, expected {}", + account_before.nonce + (batch_size as u64) + ); } - // Wait two extra blocks to be sure. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // launch namespace + // (define-public (namespace-ready (namespace (buff 20))) + let namespace = "passport"; + let tx_4 = make_contract_call( + &user_1, + 2 + (batch_size as u64) * (batches as u64), + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-ready", + &[Value::buff_from(namespace.as_bytes().to_vec()).unwrap()], + ); - assert_eq!(response_estimated_costs.len(), response_top_fee_rates.len()); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_4) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } - // Check that: - // 1) The cost is always the same. - // 2) Fee rate grows monotonically. - for i in 1..response_estimated_costs.len() { - let curr_cost = response_estimated_costs[i]; - let last_cost = response_estimated_costs[i - 1]; - assert_eq!(curr_cost, last_cost); + let mut mined_namespace_ready = false; + for _j in 0..10 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); - let curr_rate = response_top_fee_rates[i]; - let last_rate = response_top_fee_rates[i - 1]; - assert!(curr_rate >= last_rate); + let (ch, bhh) = get_chain_tip(&http_origin); + let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); + index_block_hashes.push(ibh); + + let account_after = get_account(&http_origin, &to_addr(&user_1)); + if account_after.nonce == 2 + (batch_size as u64) * (batches as u64) { + mined_namespace_ready = true; + break; + } } + assert!(mined_namespace_ready, "Did not mine namespace ready"); - // Check the final value is near input parameter. - assert!(is_close_f64( - *response_top_fee_rates.last().unwrap(), - expected_final_value - )); + // make a _ton_ of preorders + { + let mut account_before_nonces = vec![0; batches * batch_size]; + for j in 0..batches * batch_size { + let account_before = + get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); + account_before_nonces[j] = account_before.nonce; - channel.stop_chains_coordinator(); -} + let fqn = format!("janedoe{j}.passport"); + let fqn_bytes = fqn.as_bytes().to_vec(); + let salt = format!("{:04x}", j); + let salt_bytes = salt.as_bytes().to_vec(); + let mut hash_data = fqn_bytes.clone(); + hash_data.append(&mut salt_bytes.clone()); -/// Test the FuzzedWeightedMedianFeeRate with window size 5 and randomness 0. We increase the -/// amount paid linearly each time. This estimate should grow *faster* than with window size 10. -#[test] -#[ignore] -fn fuzzed_median_fee_rate_estimation_test_window5() { - fuzzed_median_fee_rate_estimation_test(5, 202680.0992) -} + let salted_hash = Hash160::from_data(&hash_data); -/// Test the FuzzedWeightedMedianFeeRate with window size 10 and randomness 0. We increase the -/// amount paid linearly each time. This estimate should grow *slower* than with window size 5. -#[test] -#[ignore] -fn fuzzed_median_fee_rate_estimation_test_window10() { - fuzzed_median_fee_rate_estimation_test(10, 90080.5496) -} + let tx_5 = make_contract_call( + &users[batches * batch_size + j], + 0, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-preorder", + &[ + Value::buff_from(salted_hash.0.to_vec()).unwrap(), + Value::UInt(500), + ], + ); -#[test] -#[ignore] -fn use_latest_tip_integration_test() { - // The purpose of this test is to check if setting the query parameter `tip` to `latest` is working - // as expected. Multiple endpoints accept this parameter, and in this test, we are using the - // GetContractSrc method to test it. - // - // The following scenarios are tested here: - // - The caller does not specify the tip paramater, and the canonical chain tip is used regardless of the - // state of the unconfirmed microblock stream. - // - The caller passes tip=latest with an existing unconfirmed microblock stream, and - // Clarity state from the unconfirmed microblock stream is successfully loaded. - // - The caller passes tip=latest with an empty unconfirmed microblock stream, and - // Clarity state from the canonical chain tip is successfully loaded (i.e. you don't - // get a 404 even though the unconfirmed chain tip points to a nonexistent MARF trie). - // - // Note: In this test, we are manually creating a microblock as well as reloading the unconfirmed - // state of the chainstate, instead of relying on `next_block_and_wait` to generate - // microblocks. We do this because the unconfirmed state is not automatically being initialized - // on the node, so attempting to validate any transactions against the expected unconfirmed - // state fails. - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_5.clone()) + .send() + .unwrap(); + + eprintln!( + "sent preorder for {}:\n{res:#?}", + &to_addr(&users[batches * batch_size + j]) + ); + if !res.status().is_success() { + panic!(""); + } + } + + wait_for_mined( + &mut btc_regtest_controller, + &blocks_processed, + &http_origin, + &users[batches * batch_size..], + &account_before_nonces, + batch_size, + batches, + &mut index_block_hashes, + ); } - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_stacks_addr = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stacks_addr.into(); + // make a _ton_ of registers + { + let mut account_before_nonces = vec![0; batches * batch_size]; + for j in 0..batches * batch_size { + let account_before = + get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); + account_before_nonces[j] = account_before.nonce; - let (mut conf, _) = neon_integration_test_conf(); + let name = format!("janedoe{j}"); + let salt = format!("{j:04x}"); - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 100300, - }); + let zonefile_hex = format!("facade01{j:04x}"); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 10_000; - conf.node.microblock_frequency = 1_000; + let tx_6 = make_contract_call( + &users[batches * batch_size + j], + 1, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-register", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(name.as_bytes().to_vec()).unwrap(), + Value::buff_from(salt.as_bytes().to_vec()).unwrap(), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); - test_observer::spawn(); - test_observer::register_any(&mut conf); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_6), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + wait_for_mined( + &mut btc_regtest_controller, + &blocks_processed, + &http_origin, + &users[batches * batch_size..], + &account_before_nonces, + batch_size, + batches, + &mut index_block_hashes, + ); + } - btc_regtest_controller.bootstrap_chain(201); + // make a _ton_ of updates + { + let mut account_before_nonces = vec![0; batches * batch_size]; + for j in 0..batches * batch_size { + let account_before = + get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); + account_before_nonces[j] = account_before.nonce; - eprintln!("Chain bootstrapped..."); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade02{j:04x}"); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); + let tx_7 = make_contract_call( + &users[batches * batch_size + j], + 2, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-update", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(name.as_bytes().to_vec()).unwrap(), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); - thread::spawn(move || run_loop.start(None, 0)); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_7), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; - // Give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } - // First block wakes up the run loop. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + wait_for_mined( + &mut btc_regtest_controller, + &blocks_processed, + &http_origin, + &users[batches * batch_size..], + &account_before_nonces, + batch_size, + batches, + &mut index_block_hashes, + ); + } - // Second block will hold our VRF registration. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // make a _ton_ of renewals + { + let mut account_before_nonces = vec![0; batches * batch_size]; + for j in 0..batches * batch_size { + let account_before = + get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); + account_before_nonces[j] = account_before.nonce; - // Third block will be the first mined Stacks block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade03{j:04x}"); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - // Let's query our first spender. - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, 100300); - assert_eq!(account.nonce, 0); + let tx_8 = make_contract_call( + &users[batches * batch_size + j], + 3, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-renewal", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(name.as_bytes().to_vec()).unwrap(), + Value::UInt(500), + Value::none(), + Value::some(Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap()) + .unwrap(), + ], + ); - // this call wakes up our node - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_8), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; - // Open chainstate. - // TODO (hack) instantiate the sortdb in the burnchain - let _ = btc_regtest_controller.sortdb_mut(); - let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); - let tip_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); - let (mut chainstate, _) = StacksChainState::open( - false, - CHAIN_ID_TESTNET, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } - // Initialize the unconfirmed state. - chainstate - .reload_unconfirmed_state( - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), - tip_hash, - ) - .unwrap(); + wait_for_mined( + &mut btc_regtest_controller, + &blocks_processed, + &http_origin, + &users[batches * batch_size..], + &account_before_nonces, + batch_size, + batches, + &mut index_block_hashes, + ); + } - // Make microblock with two transactions. - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let transfer_tx = make_stacks_transfer_mblock_only( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); + // find all attachment indexes and make sure we can get them + let mut attachment_indexes = HashMap::new(); + let mut attachment_hashes = HashMap::new(); + { + let atlasdb_path = conf_bootstrap_node.get_atlas_db_file_path(); + let atlasdb = AtlasDB::connect(AtlasConfig::new(false), &atlasdb_path, false).unwrap(); + for ibh in index_block_hashes.iter() { + let indexes = query_rows::( + &atlasdb.conn, + "SELECT attachment_index FROM attachment_instances WHERE index_block_hash = ?1", + &[ibh], + ) + .unwrap(); + if !indexes.is_empty() { + attachment_indexes.insert(*ibh, indexes.clone()); + } - let caller_src = " - (define-public (execute) - (ok stx-liquid-supply)) - "; - let publish_tx = make_contract_publish_microblock_only( - &spender_sk, - 1, - 1000, - conf.burnchain.chain_id, - "caller", - caller_src, - ); + for index in indexes.iter() { + let mut hashes = query_row_columns::( + &atlasdb.conn, + "SELECT content_hash FROM attachment_instances WHERE index_block_hash = ?1 AND attachment_index = ?2", + params![ibh, u64_to_sql(*index).unwrap()], + "content_hash") + .unwrap(); + if !hashes.is_empty() { + assert_eq!(hashes.len(), 1); + attachment_hashes.insert((*ibh, *index), hashes.pop()); + } + } + } + } + eprintln!("attachment_indexes = {attachment_indexes:?}"); - let tx_1 = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - let tx_2 = StacksTransaction::consensus_deserialize(&mut &publish_tx[..]).unwrap(); - let vec_tx = vec![tx_1, tx_2]; - let privk = - find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024).unwrap(); - let iconn = btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(); - let mblock = make_microblock( - &privk, - &mut chainstate, - &iconn, - consensus_hash, - stacks_block, - vec_tx, - ); - let mut mblock_bytes = vec![]; - mblock.consensus_serialize(&mut mblock_bytes).unwrap(); + let max_request_time_ms = 100; - let client = reqwest::blocking::Client::new(); + for (ibh, attachments) in attachment_indexes.iter() { + let l = attachments.len(); + for i in 0..(l / MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + 1) { + if i * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST >= l { + break; + } - // Post the microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(mblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + let attachments_batch = attachments[i * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + ..cmp::min((i + 1) * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, l)] + .to_vec(); + let path = format!( + "{http_origin}/v2/attachments/inv?index_block_hash={ibh}&pages_indexes={}", + attachments_batch + .iter() + .map(|a| format!("{a}")) + .collect::>() + .join(",") + ); - assert_eq!(res, format!("{}", &mblock.block_hash())); + let attempts = 10; + let ts_begin = get_epoch_time_ms(); + for _ in 0..attempts { + let res = client.get(&path).send().unwrap(); + assert!( + res.status().is_success(), + "Bad response for `{path}`: `{:?}`", + res.text().unwrap() + ); + let attachment_inv_response: GetAttachmentsInvResponse = res.json().unwrap(); + eprintln!("attachment inv response for {path}: {attachment_inv_response:?}"); + } + let ts_end = get_epoch_time_ms(); + let total_time = ts_end.saturating_sub(ts_begin); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); - // Wait for the microblock to be accepted - sleep_ms(5_000); - let path = format!("{http_origin}/v2/info"); - let mut iter_count = 0; - loop { - let tip_info = client - .get(&path) - .send() - .unwrap() - .json::() - .unwrap(); - eprintln!("{:#?}", tip_info); - if tip_info.unanchored_tip == Some(StacksBlockId([0; 32])) { - iter_count += 1; + // requests should take no more than max_request_time_ms assert!( - iter_count < 10, - "Hit retry count while waiting for net module to process pushed microblock" + total_time < attempts * max_request_time_ms, + "Atlas inventory request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); - sleep_ms(5_000); - continue; - } else { - break; } - } - // Wait at least two p2p refreshes so it can produce the microblock. - for i in 0..30 { - info!( - "wait {} more seconds for microblock miner to find our transaction...", - 30 - i - ); - sleep_ms(1000); - } + for attachment in attachments.iter().take(l) { + if *attachment == 0 { + continue; + } + let content_hash = attachment_hashes + .get(&(*ibh, *attachment)) + .cloned() + .unwrap() + .unwrap(); - // Check event observer for new microblock event (expect 1). - let microblock_events = test_observer::get_microblocks(); - assert_eq!(microblock_events.len(), 1); + let path = format!("{http_origin}/v2/attachments/{content_hash}"); - // Don't set the tip parameter, and ask for the source of the contract we just defined in a microblock. - // This should fail because the anchored tip would be unaware of this contract. - let err_opt = get_contract_src( - &http_origin, - spender_stacks_addr, - "caller".to_string(), - false, - ); - match err_opt { - Ok(_) => { - panic!( - "Asking for the contract source off the anchored tip for a contract published \ - only in unconfirmed state should error." + let attempts = 10; + let ts_begin = get_epoch_time_ms(); + for _ in 0..attempts { + let res = client.get(&path).send().unwrap(); + assert!( + res.status().is_success(), + "Bad response for `{path}`: `{:?}`", + res.text().unwrap() + ); + let attachment_response: GetAttachmentResponse = res.json().unwrap(); + eprintln!("attachment response for {path}: {attachment_response:?}"); + } + let ts_end = get_epoch_time_ms(); + let total_time = ts_end.saturating_sub(ts_begin); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); + + // requests should take no more than max_request_time_ms + assert!( + total_time < attempts * max_request_time_ms, + "Atlas chunk request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } - // Expect to get "NoSuchContract" because the function we are attempting to call is in a - // contract that only exists on unconfirmed state (and we did not set tip). - Err(err_str) => { - assert!(err_str.contains("No contract source data found")); - } } - // Set tip=latest, and ask for the source of the contract defined in the microblock. - // This should succeeed. - assert!(get_contract_src( - &http_origin, - spender_stacks_addr, - "caller".to_string(), - true, - ) - .is_ok()); - - // Mine an anchored block because now we want to have no unconfirmed state. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Check that the underlying trie for the unconfirmed state does not exist. - assert!(chainstate.unconfirmed_state.is_some()); - let unconfirmed_state = chainstate.unconfirmed_state.as_mut().unwrap(); - let trie_exists = match unconfirmed_state - .clarity_inst - .trie_exists_for_block(&unconfirmed_state.unconfirmed_chain_tip) - { - Ok(res) => res, - Err(e) => { - panic!("error when determining whether or not trie exists: {:?}", e); - } - }; - assert!(!trie_exists); - - // Set tip=latest, and ask for the source of the contract defined in the previous epoch. - // The underlying MARF trie for the unconfirmed tip does not exist, so the transaction will be - // validated against the confirmed chain tip instead of the unconfirmed tip. This should be valid. - assert!(get_contract_src( - &http_origin, - spender_stacks_addr, - "caller".to_string(), - true, - ) - .is_ok()); + test_observer::clear(); } -#[test] -#[ignore] -fn test_flash_block_skip_tenure() { +/// Run a fixed contract 20 times. Linearly increase the amount paid each time. The cost of the +/// contract should stay the same, and the fee rate paid should monotonically grow. The value +/// should grow faster for lower values of `window_size`, because a bigger window slows down the +/// growth. +fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value: f64) { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let (mut conf, miner_account) = neon_integration_test_conf(); - conf.miner.microblock_attempt_time_ms = 5_000; - conf.node.wait_time_for_microblocks = 0; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let missed_tenures = run_loop.get_missed_tenures_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // fault injection: force tenures to take too long - std::env::set_var("STX_TEST_SLOW_TENURE", "11000"); + let max_contract_src = r#" +;; define counter variable +(define-data-var counter int 0) - for i in 0..10 { - // build one bitcoin block every 10 seconds - eprintln!("Build bitcoin block +{i}"); - btc_regtest_controller.build_next_block(1); - sleep_ms(10000); - } +;; increment method +(define-public (increment) + (begin + (var-set counter (+ (var-get counter) 1)) + (ok (var-get counter)))) - // at least one tenure was skipped - let num_skipped = missed_tenures.load(Ordering::SeqCst); - eprintln!("Skipped {num_skipped} tenures"); - assert!(num_skipped > 1); + (define-public (increment-many) + (begin + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (ok (var-get counter)))) + "#; - // let's query the miner's account nonce: + let spender_sk = StacksPrivateKey::random(); + let spender_addr = to_addr(&spender_sk); - eprintln!("Miner account: {miner_account}"); + let (mut conf, _) = neon_integration_test_conf(); - let account = get_account(&http_origin, &miner_account); - eprintln!("account = {account:?}"); - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 2); + // Set this estimator as special. + conf.estimation.fee_estimator = Some(FeeEstimatorName::FuzzedWeightedMedianFeeRate); + // Use randomness of 0 to keep test constant. Randomness is tested in unit tests. + conf.estimation.fee_rate_fuzzer_fraction = 0f64; + conf.estimation.fee_rate_window_size = window_size; - channel.stop_chains_coordinator(); -} + conf.initial_balances.push(InitialBalance { + address: spender_addr.into(), + amount: 10000000000, + }); + test_observer::spawn(); + test_observer::register_any(&mut conf); -#[test] -#[ignore] -fn test_chainwork_first_intervals() { - let (conf, _) = neon_integration_test_conf(); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); - btc_regtest_controller.bootstrap_chain(2016 * 2 - 1); + btc_regtest_controller.bootstrap_chain(200); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); + let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); let channel = run_loop.get_coordinator_channel().unwrap(); thread::spawn(move || run_loop.start(None, 0)); - // give the run loop some time to start up! wait_for_runloop(&blocks_processed); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn test_chainwork_partial_interval() { - let (conf, _) = neon_integration_test_conf(); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 210, &conf); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + submit_tx( + &http_origin, + &make_contract_publish( + &spender_sk, + 0, + 110000, + conf.burnchain.chain_id, + "increment-contract", + max_contract_src, + ), + ); + run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); - btc_regtest_controller.bootstrap_chain(2016 - 1); + // Loop 20 times. Each time, execute the same transaction, but increase the amount *paid*. + // This will exercise the window size. + let mut response_estimated_costs = vec![]; + let mut response_top_fee_rates = vec![]; + for i in 1..21 { + submit_tx( + &http_origin, + &make_contract_call( + &spender_sk, + i, // nonce + i * 100000, // payment + conf.burnchain.chain_id, + &spender_addr, + "increment-contract", + "increment-many", + &[], + ), + ); + run_until_burnchain_height( + &mut btc_regtest_controller, + &blocks_processed, + 212 + 2 * i, + &conf, + ); - eprintln!("Chain bootstrapped..."); + { + // Read from the fee estimation endpoin. + let path = format!("{http_origin}/v2/fees/transaction"); - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); + let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { + address: spender_addr, + contract_name: ContractName::from("increment-contract"), + function_name: ClarityName::from("increment-many"), + function_args: vec![], + }); - let channel = run_loop.get_coordinator_channel().unwrap(); + let payload_data = tx_payload.serialize_to_vec(); + let payload_hex = format!("0x{}", to_hex(&payload_data)); - thread::spawn(move || run_loop.start(None, 0)); + let body = json!({ "transaction_payload": payload_hex.clone() }); - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - channel.stop_chains_coordinator(); -} + let client = reqwest::blocking::Client::new(); + let fee_rate_result = client + .post(&path) + .json(&body) + .send() + .expect("Should be able to post") + .json::() + .expect("Failed to parse result into JSON"); -#[test] -#[ignore] -fn test_problematic_txs_are_not_stored() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; + response_estimated_costs.push(fee_rate_result.estimated_cost_scalar); + response_top_fee_rates.push(fee_rate_result.estimations.last().unwrap().fee_rate); + } } - let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); - let spender_sk_3 = StacksPrivateKey::from_hex(SK_3).unwrap(); - let spender_stacks_addr_1 = to_addr(&spender_sk_1); - let spender_stacks_addr_2 = to_addr(&spender_sk_2); - let spender_stacks_addr_3 = to_addr(&spender_sk_3); - let spender_addr_1: PrincipalData = spender_stacks_addr_1.into(); - let spender_addr_2: PrincipalData = spender_stacks_addr_2.into(); - let spender_addr_3: PrincipalData = spender_stacks_addr_3.into(); - - let (mut conf, _) = neon_integration_test_conf(); + // Wait two extra blocks to be sure. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - conf.initial_balances.push(InitialBalance { - address: spender_addr_1, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_2, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_3, - amount: 1_000_000_000_000, - }); + assert_eq!(response_estimated_costs.len(), response_top_fee_rates.len()); - // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: 10_002, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); + // Check that: + // 1) The cost is always the same. + // 2) Fee rate grows monotonically. + for i in 1..response_estimated_costs.len() { + let curr_cost = response_estimated_costs[i]; + let last_cost = response_estimated_costs[i - 1]; + assert_eq!(curr_cost, last_cost); - // take effect immediately - conf.burnchain.ast_precheck_size_height = Some(0); + let curr_rate = response_top_fee_rates[i]; + let last_rate = response_top_fee_rates[i - 1]; + assert!(curr_rate >= last_rate); + } - test_observer::spawn(); - test_observer::register_any(&mut conf); + // Check the final value is near input parameter. + assert!(is_close_f64( + *response_top_fee_rates.last().unwrap(), + expected_final_value + )); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + channel.stop_chains_coordinator(); +} - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); +/// Test the FuzzedWeightedMedianFeeRate with window size 5 and randomness 0. We increase the +/// amount paid linearly each time. This estimate should grow *faster* than with window size 10. +#[test] +#[ignore] +fn fuzzed_median_fee_rate_estimation_test_window5() { + fuzzed_median_fee_rate_estimation_test(5, 202680.0992) +} - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); +/// Test the FuzzedWeightedMedianFeeRate with window size 10 and randomness 0. We increase the +/// amount paid linearly each time. This estimate should grow *slower* than with window size 5. +#[test] +#[ignore] +fn fuzzed_median_fee_rate_estimation_test_window10() { + fuzzed_median_fee_rate_estimation_test(10, 90080.5496) +} - // something at the limit of the expression depth (will get mined and processed) - let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; - let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); - let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); +#[test] +#[ignore] +fn use_latest_tip_integration_test() { + // The purpose of this test is to check if setting the query parameter `tip` to `latest` is working + // as expected. Multiple endpoints accept this parameter, and in this test, we are using the + // GetContractSrc method to test it. + // + // The following scenarios are tested here: + // - The caller does not specify the tip paramater, and the canonical chain tip is used regardless of the + // state of the unconfirmed microblock stream. + // - The caller passes tip=latest with an existing unconfirmed microblock stream, and + // Clarity state from the unconfirmed microblock stream is successfully loaded. + // - The caller passes tip=latest with an empty unconfirmed microblock stream, and + // Clarity state from the canonical chain tip is successfully loaded (i.e. you don't + // get a 404 even though the unconfirmed chain tip points to a nonexistent MARF trie). + // + // Note: In this test, we are manually creating a microblock as well as reloading the unconfirmed + // state of the chainstate, instead of relying on `next_block_and_wait` to generate + // microblocks. We do this because the unconfirmed state is not automatically being initialized + // on the node, so attempting to validate any transactions against the expected unconfirmed + // state fails. + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - let tx_edge = make_contract_publish( - &spender_sk_1, - 0, - (tx_edge_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-edge", - &tx_edge_body, - ); - let tx_edge_txid = StacksTransaction::consensus_deserialize(&mut &tx_edge[..]) - .unwrap() - .txid(); + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_stacks_addr = to_addr(&spender_sk); + let spender_addr: PrincipalData = spender_stacks_addr.into(); - // something just over the limit of the expression depth - let exceeds_repeat_factor = edge_repeat_factor + 1; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); + let (mut conf, _) = neon_integration_test_conf(); - let tx_exceeds = make_contract_publish( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-exceeds", - &tx_exceeds_body, - ); - let tx_exceeds_txid = StacksTransaction::consensus_deserialize(&mut &tx_exceeds[..]) - .unwrap() - .txid(); + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 100300, + }); - // something stupidly high over the expression depth - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 10_000; + conf.node.microblock_frequency = 1_000; - let tx_high = make_contract_publish( - &spender_sk_3, - 0, - (tx_high_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-high", - &tx_high_body, - ); - let tx_high_txid = StacksTransaction::consensus_deserialize(&mut &tx_high[..]) - .unwrap() - .txid(); + test_observer::spawn(); + test_observer::register_any(&mut conf); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); @@ -9648,7 +7052,6 @@ fn test_problematic_txs_are_not_stored() { let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); thread::spawn(move || run_loop.start(None, 0)); @@ -9664,164 +7067,207 @@ fn test_problematic_txs_are_not_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - submit_tx(&http_origin, &tx_edge); - submit_tx(&http_origin, &tx_exceeds); - submit_tx(&http_origin, &tx_high); + // Let's query our first spender. + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.balance, 100300); + assert_eq!(account.nonce, 0); - // only tx_edge should be in the mempool - assert!(get_unconfirmed_tx(&http_origin, &tx_edge_txid).is_some()); - assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_none()); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); + // this call wakes up our node + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - channel.stop_chains_coordinator(); -} + // Open chainstate. + // TODO (hack) instantiate the sortdb in the burnchain + let _ = btc_regtest_controller.sortdb_mut(); + let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); + let tip_hash = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); + let (mut chainstate, _) = StacksChainState::open( + false, + CHAIN_ID_TESTNET, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); -fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { - let dirpp = Path::new(dirp); - debug!("readdir {dirp}"); - let cur_files = fs::read_dir(dirp).unwrap(); - let mut new_files = vec![]; - let mut cur_files_set = HashSet::new(); - for cur_file in cur_files.into_iter() { - let cur_file = cur_file.unwrap(); - let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); - test_debug!("file in {dirp}: {cur_file_fullpath}"); - cur_files_set.insert(cur_file_fullpath.clone()); - if prev_files.contains(&cur_file_fullpath) { - test_debug!("already contains {cur_file_fullpath}"); - continue; - } - test_debug!("new file {cur_file_fullpath}"); - new_files.push(cur_file_fullpath); - } - debug!( - "Checked {dirp} for new files; found {} (all: {})", - new_files.len(), - cur_files_set.len() - ); - (new_files, cur_files_set) -} + // Initialize the unconfirmed state. + chainstate + .reload_unconfirmed_state( + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), + tip_hash, + ) + .unwrap(); -fn spawn_follower_node( - initial_conf: &Config, -) -> ( - Config, - neon::RunLoopCounter, - PoxSyncWatchdogComms, - CoordinatorChannels, -) { - let bootstrap_node_public_key = { - let keychain = Keychain::default(initial_conf.node.seed.clone()); - let mut pk = keychain.generate_op_signer().get_public_key(); - pk.set_compressed(true); - pk.to_hex() - }; + // Make microblock with two transactions. + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + let transfer_tx = make_stacks_transfer_mblock_only( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); - let (mut conf, _) = neon_integration_test_conf(); - conf.node.set_bootstrap_nodes( - format!( - "{}@{}", - &bootstrap_node_public_key, initial_conf.node.p2p_bind - ), + let caller_src = " + (define-public (execute) + (ok stx-liquid-supply)) + "; + let publish_tx = make_contract_publish_microblock_only( + &spender_sk, + 1, + 1000, conf.burnchain.chain_id, - conf.burnchain.peer_version, + "caller", + caller_src, ); - test_observer::register_any(&mut conf); + let tx_1 = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); + let tx_2 = StacksTransaction::consensus_deserialize(&mut &publish_tx[..]).unwrap(); + let vec_tx = vec![tx_1, tx_2]; + let privk = + find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024).unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); + let mblock = make_microblock( + &privk, + &mut chainstate, + &iconn, + consensus_hash, + stacks_block, + vec_tx, + ); + let mut mblock_bytes = vec![]; + mblock.consensus_serialize(&mut mblock_bytes).unwrap(); - conf.initial_balances = initial_conf.initial_balances.clone(); - conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); - conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; + let client = reqwest::blocking::Client::new(); - conf.connection_options.inv_sync_interval = 3; + // Post the microblock + let path = format!("{http_origin}/v2/microblocks"); + let res: String = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(mblock_bytes.clone()) + .send() + .unwrap() + .json() + .unwrap(); - conf.node.always_use_affirmation_maps = false; + assert_eq!(res, format!("{}", &mblock.block_hash())); - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - let pox_sync = run_loop.get_pox_sync_comms(); + // Wait for the microblock to be accepted + sleep_ms(5_000); + let path = format!("{http_origin}/v2/info"); + let mut iter_count = 0; + loop { + let tip_info = client + .get(&path) + .send() + .unwrap() + .json::() + .unwrap(); + eprintln!("{:#?}", tip_info); + if tip_info.unanchored_tip == Some(StacksBlockId([0; 32])) { + iter_count += 1; + assert!( + iter_count < 10, + "Hit retry count while waiting for net module to process pushed microblock" + ); + sleep_ms(5_000); + continue; + } else { + break; + } + } + + // Wait at least two p2p refreshes so it can produce the microblock. + for i in 0..30 { + info!( + "wait {} more seconds for microblock miner to find our transaction...", + 30 - i + ); + sleep_ms(1000); + } + + // Check event observer for new microblock event (expect 1). + let microblock_events = test_observer::get_microblocks(); + assert_eq!(microblock_events.len(), 1); + + // Don't set the tip parameter, and ask for the source of the contract we just defined in a microblock. + // This should fail because the anchored tip would be unaware of this contract. + let err_opt = get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + false, + ); + match err_opt { + Ok(_) => { + panic!( + "Asking for the contract source off the anchored tip for a contract published \ + only in unconfirmed state should error." + ); + } + // Expect to get "NoSuchContract" because the function we are attempting to call is in a + // contract that only exists on unconfirmed state (and we did not set tip). + Err(err_str) => { + assert!(err_str.contains("No contract source data found")); + } + } - thread::spawn(move || run_loop.start(None, 0)); + // Set tip=latest, and ask for the source of the contract defined in the microblock. + // This should succeeed. + assert!(get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + true, + ) + .is_ok()); - // Give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + // Mine an anchored block because now we want to have no unconfirmed state. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - (conf, blocks_processed, pox_sync, channel) + // Check that the underlying trie for the unconfirmed state does not exist. + assert!(chainstate.unconfirmed_state.is_some()); + let unconfirmed_state = chainstate.unconfirmed_state.as_mut().unwrap(); + let trie_exists = match unconfirmed_state + .clarity_inst + .trie_exists_for_block(&unconfirmed_state.unconfirmed_chain_tip) + { + Ok(res) => res, + Err(e) => { + panic!("error when determining whether or not trie exists: {:?}", e); + } + }; + assert!(!trie_exists); + + // Set tip=latest, and ask for the source of the contract defined in the previous epoch. + // The underlying MARF trie for the unconfirmed tip does not exist, so the transaction will be + // validated against the confirmed chain tip instead of the unconfirmed tip. This should be valid. + assert!(get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + true, + ) + .is_ok()); } -// TODO: test in epoch 2.1 with parser_v2 #[test] #[ignore] -fn test_problematic_blocks_are_not_mined() { +fn test_flash_block_skip_tenure() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_mined"; - if fs::metadata(bad_blocks_dir).is_ok() { - fs::remove_dir_all(bad_blocks_dir).unwrap(); - } - fs::create_dir_all(bad_blocks_dir).unwrap(); - - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); - - let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); - let spender_sk_3 = StacksPrivateKey::from_hex(SK_3).unwrap(); - let spender_stacks_addr_1 = to_addr(&spender_sk_1); - let spender_stacks_addr_2 = to_addr(&spender_sk_2); - let spender_stacks_addr_3 = to_addr(&spender_sk_3); - let spender_addr_1: PrincipalData = spender_stacks_addr_1.into(); - let spender_addr_2: PrincipalData = spender_stacks_addr_2.into(); - let spender_addr_3: PrincipalData = spender_stacks_addr_3.into(); - - let (mut conf, _) = neon_integration_test_conf(); - - conf.initial_balances.push(InitialBalance { - address: spender_addr_1, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_2, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_3, - amount: 1_000_000_000_000, - }); - - // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: 10_002, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); - - // AST precheck becomes default at burn height - conf.burnchain.ast_precheck_size_height = Some(210); - - test_observer::spawn(); - test_observer::register_any(&mut conf); + let (mut conf, miner_account) = neon_integration_test_conf(); + conf.miner.microblock_attempt_time_ms = 5_000; + conf.node.wait_time_for_microblocks = 0; let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -9831,277 +7277,118 @@ fn test_problematic_blocks_are_not_mined() { let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); let http_origin = format!("http://{}", &conf.node.rpc_bind); - // something just over the limit of the expression depth - let exceeds_repeat_factor = 32; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - - let tx_exceeds = make_contract_publish( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-exceeds", - &tx_exceeds_body, - ); - let tx_exceeds_txid = StacksTransaction::consensus_deserialize(&mut &tx_exceeds[..]) - .unwrap() - .txid(); - - // something stupidly high over the expression depth - let high_repeat_factor = 3200; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - - let tx_high = make_contract_publish( - &spender_sk_3, - 0, - (tx_high_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-high", - &tx_high_body, - ); - let tx_high_txid = StacksTransaction::consensus_deserialize(&mut &tx_high[..]) - .unwrap() - .txid(); - btc_regtest_controller.bootstrap_chain(201); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf.clone()); + let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); + let missed_tenures = run_loop.get_missed_tenures_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); thread::spawn(move || run_loop.start(None, 0)); - // Give the run loop some time to start up! + // give the run loop some time to start up! wait_for_runloop(&blocks_processed); - // First block wakes up the run loop. + // first block wakes up the run loop next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // Second block will hold our VRF registration. + // first block will hold our VRF registration next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // Third block will be the first mined Stacks block. + // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); - submit_tx(&http_origin, &tx_exceeds); - assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; - - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - } - - // all blocks were processed - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) - }) - .expect("Failed waiting for blocks to be processed"); - - // no blocks considered problematic - assert!(all_new_files.is_empty()); + // fault injection: force tenures to take too long + std::env::set_var("STX_TEST_SLOW_TENURE", "11000"); - // one block contained tx_exceeds - let blocks = test_observer::get_blocks(); - let mut found = false; - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - if parsed.txid() == tx_exceeds_txid { - found = true; - break; - } - } - } + for i in 0..10 { + // build one bitcoin block every 10 seconds + eprintln!("Build bitcoin block +{i}"); + btc_regtest_controller.build_next_block(1); + sleep_ms(10000); } - assert!(found); - - let (tip, cur_ast_rules) = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) - }; - - assert_eq!(cur_ast_rules, ASTRules::Typical); - - // add another bad tx to the mempool - debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); - submit_tx(&http_origin, &tx_high); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - - btc_regtest_controller.build_next_block(1); - - // wait for runloop to advance - wait_for(30, || { - let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - Ok(new_tip.block_height > tip.block_height) - }) - .expect("Failed waiting for blocks to be processed"); + // at least one tenure was skipped + let num_skipped = missed_tenures.load(Ordering::SeqCst); + eprintln!("Skipped {num_skipped} tenures"); + assert!(num_skipped > 1); - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; + // let's query the miner's account nonce: - // new rules took effect - assert_eq!(cur_ast_rules, ASTRules::PrecheckSize); + eprintln!("Miner account: {miner_account}"); - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; + let account = get_account(&http_origin, &miner_account); + eprintln!("account = {account:?}"); + assert_eq!(account.balance, 0); + assert_eq!(account.nonce, 2); - eprintln!("old_tip_info = {old_tip_info:?}"); + channel.stop_chains_coordinator(); +} - // mine some blocks, and log problematic blocks - for _i in 0..6 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - } +#[test] +#[ignore] +fn test_chainwork_first_intervals() { + let (conf, _) = neon_integration_test_conf(); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - // all blocks were processed - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) - }) - .expect("Failed waiting for blocks to be processed"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - // none were problematic - assert!(all_new_files.is_empty()); + btc_regtest_controller.bootstrap_chain(2016 * 2 - 1); - // recently-submitted problematic transactions are not in the mempool - // (but old ones that were already mined, and thus never considered, could still be present) - test_debug!("Problematic tx {tx_high_txid} should be dropped"); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); + eprintln!("Chain bootstrapped..."); - // no block contained the tx_high bad transaction, ever - let blocks = test_observer::get_blocks(); - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - assert!(parsed.txid() != tx_high_txid); - } - } - } + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); - let new_tip_info = get_chain_info(&conf); + let channel = run_loop.get_coordinator_channel().unwrap(); - eprintln!("\nBooting follower\n"); + thread::spawn(move || run_loop.start(None, 0)); - // verify that a follower node that boots up with this node as a bootstrap peer will process - // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + channel.stop_chains_coordinator(); +} - eprintln!( - "\nFollower booted on port {},{}\n", - follower_conf.node.p2p_bind, follower_conf.node.rpc_bind - ); +#[test] +#[ignore] +fn test_chainwork_partial_interval() { + let (conf, _) = neon_integration_test_conf(); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - // Do not unwrap in case we were just slow - let _ = wait_for(300, || { - let follower_tip_info = get_chain_info(&follower_conf); - eprintln!( - "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); - Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) - }); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - // make sure we aren't just slow -- wait for the follower to do a few download passes - let num_download_passes = pox_sync_comms.get_download_passes(); - eprintln!( - "\nFollower has performed {num_download_passes} download passes; wait for {}\n", - num_download_passes + 5 - ); + btc_regtest_controller.bootstrap_chain(2016 - 1); - wait_for(30, || { - let download_passes = pox_sync_comms.get_download_passes(); - eprintln!( - "\nFollower has performed {download_passes} download passes; wait for {}\n", - num_download_passes + 5 - ); - Ok(download_passes >= num_download_passes + 5) - }) - .expect("Failed waiting for follower to perform enough download passes"); + eprintln!("Chain bootstrapped..."); - eprintln!( - "\nFollower has performed {} download passes\n", - pox_sync_comms.get_download_passes() - ); + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); - let follower_tip_info = get_chain_info(&follower_conf); - eprintln!( - "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); + let channel = run_loop.get_coordinator_channel().unwrap(); - assert_eq!( - follower_tip_info.stacks_tip_height, - new_tip_info.stacks_tip_height - ); + thread::spawn(move || run_loop.start(None, 0)); - test_observer::clear(); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); } -// TODO: test in epoch 2.1 with parser_v2 #[test] #[ignore] -fn test_problematic_blocks_are_not_relayed_or_stored() { +fn test_problematic_txs_are_not_stored() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; - if fs::metadata(bad_blocks_dir).is_ok() { - fs::remove_dir_all(bad_blocks_dir).unwrap(); - } - fs::create_dir_all(bad_blocks_dir).unwrap(); - - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); - let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_sk_3 = StacksPrivateKey::from_hex(SK_3).unwrap(); @@ -10153,13 +7440,14 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { ])); conf.burnchain.pox_2_activation = Some(10_003); - // AST precheck becomes default at burn height - conf.burnchain.ast_precheck_size_height = Some(210); + // take effect immediately + conf.burnchain.ast_precheck_size_height = Some(0); test_observer::spawn(); test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); @@ -10167,306 +7455,177 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); let http_origin = format!("http://{}", &conf.node.rpc_bind); - // something just over the limit of the expression depth - let exceeds_repeat_factor = 32; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - - let tx_exceeds = make_contract_publish( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-exceeds", - &tx_exceeds_body, - ); - let tx_exceeds_txid = StacksTransaction::consensus_deserialize(&mut &tx_exceeds[..]) - .unwrap() - .txid(); - - let high_repeat_factor = 70; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); + // something at the limit of the expression depth (will get mined and processed) + let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; + let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); + let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); + let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); - let tx_high = make_contract_publish( - &spender_sk_3, + let tx_edge = make_contract_publish( + &spender_sk_1, 0, - (tx_high_body.len() * 100) as u64, + (tx_edge_body.len() * 100) as u64, conf.burnchain.chain_id, - "test-high", - &tx_high_body, - ); - let tx_high_txid = StacksTransaction::consensus_deserialize(&mut &tx_high[..]) - .unwrap() - .txid(); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // Give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // First block wakes up the run loop. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Second block will hold our VRF registration. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Third block will be the first mined Stacks block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); - submit_tx(&http_origin, &tx_exceeds); - assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; - - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - } - - let tip_info = get_chain_info(&conf); - - // blocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); - // no blocks considered problematic - assert!(all_new_files.is_empty()); - - // one block contained tx_exceeds - let blocks = test_observer::get_blocks(); - let mut found = false; - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - if parsed.txid() == tx_exceeds_txid { - found = true; - break; - } - } - } - } - - assert!(found); + "test-edge", + &tx_edge_body, + ); + let tx_edge_txid = StacksTransaction::consensus_deserialize(&mut &tx_edge[..]) + .unwrap() + .txid(); - let (tip, cur_ast_rules) = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) - }; + // something just over the limit of the expression depth + let exceeds_repeat_factor = edge_repeat_factor + 1; + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - assert_eq!(cur_ast_rules, ASTRules::Typical); + let tx_exceeds = make_contract_publish( + &spender_sk_2, + 0, + (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, + "test-exceeds", + &tx_exceeds_body, + ); + let tx_exceeds_txid = StacksTransaction::consensus_deserialize(&mut &tx_exceeds[..]) + .unwrap() + .txid(); - btc_regtest_controller.build_next_block(1); + // something stupidly high over the expression depth + let high_repeat_factor = 128 * 1024; + let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); + let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - // wait for runloop to advance - loop { - sleep_ms(1_000); - let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; + let tx_high = make_contract_publish( + &spender_sk_3, + 0, + (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, + "test-high", + &tx_high_body, + ); + let tx_high_txid = StacksTransaction::consensus_deserialize(&mut &tx_high[..]) + .unwrap() + .txid(); - // new rules took effect - assert_eq!(cur_ast_rules, ASTRules::PrecheckSize); + btc_regtest_controller.bootstrap_chain(201); - // the follower we will soon boot up will start applying the new AST rules at this height. - // Make it so the miner does *not* follow the rules - { - let sortdb = btc_regtest_controller.sortdb_mut(); - let mut tx = sortdb.tx_begin().unwrap(); - SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 10_000).unwrap(); - tx.commit().unwrap(); - } - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; + eprintln!("Chain bootstrapped..."); - // we reverted to the old rules (but the follower won't) - assert_eq!(cur_ast_rules, ASTRules::Typical); + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); - // add another bad tx to the mempool. - // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); - submit_tx(&http_origin, &tx_high); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + thread::spawn(move || run_loop.start(None, 0)); - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; + // Give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - eprintln!("old_tip_info = {old_tip_info:?}"); + // First block wakes up the run loop. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // mine some blocks, and log problematic blocks - for _i in 0..6 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; + // Second block will hold our VRF registration. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let cur_ast_rules = - SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; + // Third block will be the first mined Stacks block. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // we reverted to the old rules (but the follower won't) - assert_eq!(cur_ast_rules, ASTRules::Typical); - } + submit_tx(&http_origin, &tx_edge); + submit_tx(&http_origin, &tx_exceeds); + submit_tx(&http_origin, &tx_high); - let tip_info = get_chain_info(&conf); + // only tx_edge should be in the mempool + assert!(get_unconfirmed_tx(&http_origin, &tx_edge_txid).is_some()); + assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_none()); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); - // at least one block was mined (hard to say how many due to the raciness between the burnchain - // downloader and this thread). - info!( - "tip_info.stacks_tip_height = {}, old_tip_info.stacks_tip_height = {}", - tip_info.stacks_tip_height, old_tip_info.stacks_tip_height - ); - assert!(tip_info.stacks_tip_height > old_tip_info.stacks_tip_height); - // one was problematic -- i.e. the one that included tx_high - assert_eq!(all_new_files.len(), 1); + channel.stop_chains_coordinator(); +} - // tx_high got mined by the miner - let blocks = test_observer::get_blocks(); - let mut bad_block_height = None; - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - if parsed.txid() == tx_high_txid { - bad_block_height = Some(block.get("block_height").unwrap().as_u64().unwrap()); - } - } +fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { + let dirpp = Path::new(dirp); + debug!("readdir {dirp}"); + let cur_files = fs::read_dir(dirp).unwrap(); + let mut new_files = vec![]; + let mut cur_files_set = HashSet::new(); + for cur_file in cur_files.into_iter() { + let cur_file = cur_file.unwrap(); + let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); + test_debug!("file in {dirp}: {cur_file_fullpath}"); + cur_files_set.insert(cur_file_fullpath.clone()); + if prev_files.contains(&cur_file_fullpath) { + test_debug!("already contains {cur_file_fullpath}"); + continue; } + test_debug!("new file {cur_file_fullpath}"); + new_files.push(cur_file_fullpath); } - assert!(bad_block_height.is_some()); - let bad_block_height = bad_block_height.unwrap(); - - // follower should not process bad_block_height or higher - let new_tip_info = get_chain_info(&conf); - - eprintln!("\nBooting follower\n"); + debug!( + "Checked {dirp} for new files; found {} (all: {})", + new_files.len(), + cur_files_set.len() + ); + (new_files, cur_files_set) +} - // verify that a follower node that boots up with this node as a bootstrap peer will process - // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); +fn spawn_follower_node( + initial_conf: &Config, +) -> ( + Config, + neon::RunLoopCounter, + PoxSyncWatchdogComms, + CoordinatorChannels, +) { + let bootstrap_node_public_key = { + let keychain = Keychain::default(initial_conf.node.seed.clone()); + let mut pk = keychain.generate_op_signer().get_public_key(); + pk.set_compressed(true); + pk.to_hex() + }; - eprintln!( - "\nFollower booted on port {},{}\n", - follower_conf.node.p2p_bind, follower_conf.node.rpc_bind + let (mut conf, _) = neon_integration_test_conf(); + conf.node.set_bootstrap_nodes( + format!( + "{}@{}", + &bootstrap_node_public_key, initial_conf.node.p2p_bind + ), + conf.burnchain.chain_id, + conf.burnchain.peer_version, ); - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { - let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height - || follower_tip_info.stacks_tip_height + 1 == bad_block_height - { - break; - } - eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); - sleep_ms(1000); - } + test_observer::register_any(&mut conf); - // make sure we aren't just slow -- wait for the follower to do a few download passes - let num_download_passes = pox_sync_comms.get_download_passes(); - eprintln!( - "\nFollower has performed {num_download_passes} download passes; wait for {}\n", - num_download_passes + 5 - ); + conf.initial_balances = initial_conf.initial_balances.clone(); + conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); + conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); - eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), - num_download_passes + 5 - ); - } + conf.connection_options.inv_sync_interval = 3; - eprintln!( - "\nFollower has performed {} download passes\n", - pox_sync_comms.get_download_passes() - ); + conf.node.always_use_affirmation_maps = false; + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); + let pox_sync = run_loop.get_pox_sync_comms(); - let follower_tip_info = get_chain_info(&follower_conf); - eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); + thread::spawn(move || run_loop.start(None, 0)); - // follower rejects the bad block - assert_eq!(follower_tip_info.stacks_tip_height, bad_block_height - 1); + // Give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - test_observer::clear(); - channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); + (conf, blocks_processed, pox_sync, channel) } // TODO: test in epoch 2.1 with parser_v2 #[test] #[ignore] -fn test_problematic_microblocks_are_not_mined() { +fn test_problematic_blocks_are_not_mined() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_mined"; + let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_mined"; if fs::metadata(bad_blocks_dir).is_ok() { fs::remove_dir_all(bad_blocks_dir).unwrap(); } @@ -10528,12 +7687,6 @@ fn test_problematic_microblocks_are_not_mined() { // AST precheck becomes default at burn height conf.burnchain.ast_precheck_size_height = Some(210); - // mine microblocks - conf.node.mine_microblocks = true; - conf.node.microblock_frequency = 1_000; - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -10551,7 +7704,7 @@ fn test_problematic_microblocks_are_not_mined() { let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - let tx_exceeds = make_contract_publish_microblock_only( + let tx_exceeds = make_contract_publish( &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, @@ -10564,12 +7717,12 @@ fn test_problematic_microblocks_are_not_mined() { .txid(); // something stupidly high over the expression depth - let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = 3200; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - let tx_high = make_contract_publish_microblock_only( + let tx_high = make_contract_publish( &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, @@ -10603,12 +7756,11 @@ fn test_problematic_microblocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - info!("Submitted problematic tx_exceeds transaction {tx_exceeds_txid}"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10620,26 +7772,23 @@ fn test_problematic_microblocks_are_not_mined() { let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); all_new_files.append(&mut new_files); cur_files = cur_files_new; - - // give the microblock miner a chance - sleep_ms(5_000); } - // microblocks and blocks were all processed + // all blocks were processed wait_for(30, || { let tip_info = get_chain_info(&conf); Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) }) - .expect("Failed waiting for microblocks to be processed"); + .expect("Failed waiting for blocks to be processed"); - // no microblocks considered problematic + // no blocks considered problematic assert!(all_new_files.is_empty()); - // one microblock contained tx_exceeds - let microblocks = test_observer::get_microblocks(); + // one block contained tx_exceeds + let blocks = test_observer::get_blocks(); let mut found = false; - for microblock in microblocks { - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -10669,15 +7818,13 @@ fn test_problematic_microblocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - info!("Submit problematic tx_high transaction {tx_high_txid}"); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - info!("Submitted problematic tx_high transaction {tx_high_txid}"); btc_regtest_controller.build_next_block(1); - info!("Mined block after submitting problematic tx_high transaction {tx_high_txid}"); // wait for runloop to advance wait_for(30, || { @@ -10685,7 +7832,7 @@ fn test_problematic_microblocks_are_not_mined() { let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) - .expect("Failed waiting for runloop to advance"); + .expect("Failed waiting for blocks to be processed"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); @@ -10704,24 +7851,21 @@ fn test_problematic_microblocks_are_not_mined() { eprintln!("old_tip_info = {old_tip_info:?}"); - // mine some microblocks, and log problematic microblocks + // mine some blocks, and log problematic blocks for _i in 0..6 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); all_new_files.append(&mut new_files); cur_files = cur_files_new; - - // give the microblock miner a chance - sleep_ms(5_000); } - // sleep a little longer before checking tip info; this should help with test flakiness + // all blocks were processed wait_for(30, || { let tip_info = get_chain_info(&conf); Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) }) - .expect("Failed waiting for microblocks to be processed"); + .expect("Failed waiting for blocks to be processed"); // none were problematic assert!(all_new_files.is_empty()); @@ -10731,10 +7875,10 @@ fn test_problematic_microblocks_are_not_mined() { test_debug!("Problematic tx {tx_high_txid} should be dropped"); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); - // no microblock contained the tx_high bad transaction, ever - let microblocks = test_observer::get_microblocks(); - for microblock in microblocks { - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); + // no block contained the tx_high bad transaction, ever + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -10743,7 +7887,7 @@ fn test_problematic_microblocks_are_not_mined() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(..) = &parsed.payload { - assert_ne!(parsed.txid(), tx_high_txid); + assert!(parsed.txid() != tx_high_txid); } } } @@ -10761,12 +7905,12 @@ fn test_problematic_microblocks_are_not_mined() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - // Do not unwrap as we may just be slow + // Do not unwrap in case we were just slow let _ = wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) }); @@ -10812,12 +7956,12 @@ fn test_problematic_microblocks_are_not_mined() { // TODO: test in epoch 2.1 with parser_v2 #[test] #[ignore] -fn test_problematic_microblocks_are_not_relayed_or_stored() { +fn test_problematic_blocks_are_not_relayed_or_stored() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_relayed_or_stored"; + let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; if fs::metadata(bad_blocks_dir).is_ok() { fs::remove_dir_all(bad_blocks_dir).unwrap(); } @@ -10879,14 +8023,6 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // AST precheck becomes default at burn height conf.burnchain.ast_precheck_size_height = Some(210); - // mine microblocks - conf.node.mine_microblocks = true; - conf.node.microblock_frequency = 1_000; - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - - conf.connection_options.inv_sync_interval = 3; - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -10904,7 +8040,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - let tx_exceeds = make_contract_publish_microblock_only( + let tx_exceeds = make_contract_publish( &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, @@ -10916,13 +8052,12 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .unwrap() .txid(); - // greatly exceeds AST depth, but is still mineable without a stack overflow - let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = 70; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - let tx_high = make_contract_publish_microblock_only( + let tx_high = make_contract_publish( &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, @@ -10972,26 +8107,23 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); all_new_files.append(&mut new_files); cur_files = cur_files_new; - - // give the microblock miner a chance - sleep_ms(5_000); } - // microblocks and blocks were all processed - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) - }) - .expect("Failed waiting for microblocks to be processed"); + let tip_info = get_chain_info(&conf); - // no microblocks considered problematic + // blocks were all processed + assert_eq!( + tip_info.stacks_tip_height, + old_tip_info.stacks_tip_height + 5 + ); + // no blocks considered problematic assert!(all_new_files.is_empty()); - // one microblock contained tx_exceeds - let microblocks = test_observer::get_microblocks(); + // one block contained tx_exceeds + let blocks = test_observer::get_blocks(); let mut found = false; - for microblock in microblocks { - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -11023,13 +8155,14 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { btc_regtest_controller.build_next_block(1); // wait for runloop to advance - wait_for(30, || { + loop { + sleep_ms(1_000); let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - Ok(new_tip.block_height > tip.block_height) - }) - .expect("Failed waiting for runloop to advance"); - + if new_tip.block_height > tip.block_height { + break; + } + } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -11063,7 +8196,6 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); @@ -11075,7 +8207,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { eprintln!("old_tip_info = {old_tip_info:?}"); - // mine some blocks, and log problematic microblocks + // mine some blocks, and log problematic blocks for _i in 0..6 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); @@ -11093,28 +8225,25 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // we reverted to the old rules (but the follower won't) assert_eq!(cur_ast_rules, ASTRules::Typical); - - // give the microblock miner a chance - sleep_ms(5_000); } - // sleep a little longer before checking tip info; this should help with test flakiness - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) - }) - .expect("Failed waiting for microblocks to be processed"); + let tip_info = get_chain_info(&conf); - // at least one was problematic. - // the miner might make multiple microblocks (only some of which are confirmed), so also check - // the event observer to see that we actually picked up tx_high - assert!(!all_new_files.is_empty()); + // at least one block was mined (hard to say how many due to the raciness between the burnchain + // downloader and this thread). + info!( + "tip_info.stacks_tip_height = {}, old_tip_info.stacks_tip_height = {}", + tip_info.stacks_tip_height, old_tip_info.stacks_tip_height + ); + assert!(tip_info.stacks_tip_height > old_tip_info.stacks_tip_height); + // one was problematic -- i.e. the one that included tx_high + assert_eq!(all_new_files.len(), 1); // tx_high got mined by the miner - let microblocks = test_observer::get_microblocks(); - let mut bad_block_id = None; - for microblock in microblocks { - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); + let blocks = test_observer::get_blocks(); + let mut bad_block_height = None; + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -11124,26 +8253,13 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(..) = &parsed.payload { if parsed.txid() == tx_high_txid { - bad_block_id = { - let parts: Vec<_> = microblock - .get("parent_index_block_hash") - .unwrap() - .as_str() - .unwrap() - .split("0x") - .collect(); - let bad_block_id_hex = parts[1]; - debug!("bad_block_id_hex = '{bad_block_id_hex}'"); - Some(StacksBlockId::from_hex(bad_block_id_hex).unwrap()) - }; + bad_block_height = Some(block.get("block_height").unwrap().as_u64().unwrap()); } } } } - assert!(bad_block_id.is_some()); - let bad_block_id = bad_block_id.unwrap(); - let bad_block = get_block(&http_origin, &bad_block_id).unwrap(); - let bad_block_height = bad_block.header.total_work.work; + assert!(bad_block_height.is_some()); + let bad_block_height = bad_block_height.unwrap(); // follower should not process bad_block_height or higher let new_tip_info = get_chain_info(&conf); @@ -11159,15 +8275,20 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - // Do not unwrap as we may just be slow - let _ = wait_for(300, || { + let deadline = get_epoch_time_secs() + 300; + while get_epoch_time_secs() < deadline { let follower_tip_info = get_chain_info(&follower_conf); + if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height + || follower_tip_info.stacks_tip_height + 1 == bad_block_height + { + break; + } eprintln!( - "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, + "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); - Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) - }); + sleep_ms(1000); + } // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -11176,15 +8297,15 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { num_download_passes + 5 ); - wait_for(30, || { - let download_passes = pox_sync_comms.get_download_passes(); + while num_download_passes + 5 > pox_sync_comms.get_download_passes() { + sleep_ms(1000); eprintln!( - "\nFollower has performed {download_passes} download passes; wait for {}\n", + "\nFollower has performed {} download passes; wait for {}\n", + pox_sync_comms.get_download_passes(), num_download_passes + 5 ); - Ok(download_passes >= num_download_passes + 5) - }) - .expect("Failed waiting for follower to perform enough download passes"); + } + eprintln!( "\nFollower has performed {} download passes\n", pox_sync_comms.get_download_passes() @@ -11196,8 +8317,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); - // follower rejects the bad microblock -- can't append subsequent blocks - assert_eq!(follower_tip_info.stacks_tip_height, bad_block_height); + // follower rejects the bad block + assert_eq!(follower_tip_info.stacks_tip_height, bad_block_height - 1); test_observer::clear(); channel.stop_chains_coordinator(); @@ -11461,38 +8582,6 @@ pub fn make_random_tx_chain( chain } -fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { - let addr = to_addr(privk); - let mut chain = vec![]; - - for nonce in 0..25 { - // N.B. private keys are 32-33 bytes, so this is always safe - let random_iters = privk.to_bytes()[nonce as usize] as usize; - - let be_bytes = [ - privk.to_bytes()[nonce as usize], - privk.to_bytes()[(nonce + 1) as usize], - ]; - - let random_extra_fee = u16::from_be_bytes(be_bytes) as u64; - - let mut addr_prefix = addr.to_string(); - let _ = addr_prefix.split_off(12); - let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); - eprintln!("Make tx {contract_name}"); - let tx = make_contract_publish_microblock_only( - privk, - nonce, - 1049230 + nonce + fee_plus + random_extra_fee, - chain_id, - &contract_name, - &make_runtime_sized_contract(1, nonce, &addr_prefix), - ); - chain.push(tx); - } - chain -} - fn test_competing_miners_build_on_same_chain( num_miners: usize, conf_template: Config, @@ -11727,114 +8816,6 @@ fn test_competing_miners_build_anchor_blocks_on_same_chain_without_rbf() { test_competing_miners_build_on_same_chain(5, conf, false, 10_000, TxChainStrategy::Expensive) } -// TODO: this needs to run as a smoke test, since they take too long to run in CI -#[test] -#[ignore] -fn test_competing_miners_build_anchor_blocks_and_microblocks_on_same_chain() { - let (mut conf, _) = neon_integration_test_conf(); - - conf.node.mine_microblocks = true; - conf.miner.microblock_attempt_time_ms = 2_000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 0; - conf.miner.first_attempt_time_ms = 2_000; - conf.miner.subsequent_attempt_time_ms = 5_000; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_blocks = 1_000; - - test_competing_miners_build_on_same_chain(5, conf, true, 15_000, TxChainStrategy::Random) -} - -#[test] -#[ignore] -fn microblock_miner_multiple_attempts() { - let (mut conf, miner_account) = neon_integration_test_conf(); - let chain_id = conf.burnchain.chain_id; - - conf.node.mine_microblocks = true; - conf.miner.microblock_attempt_time_ms = 2_000; - conf.node.wait_time_for_microblocks = 100; - conf.node.microblock_frequency = 100; - conf.miner.first_attempt_time_ms = 2_000; - conf.miner.subsequent_attempt_time_ms = 5_000; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_blocks = 1_000; - - let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::random()).collect(); - let balances: Vec<_> = privks - .iter() - .map(|privk| { - let addr = to_addr(privk); - InitialBalance { - address: addr.into(), - amount: 1_000_000_000, - } - }) - .collect(); - - conf.initial_balances = balances; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // let's query the miner's account nonce: - - let account = get_account(&http_origin, &miner_account); - eprintln!("Miner account: {account:?}"); - - let all_txs: Vec<_> = privks - .iter() - .enumerate() - .map(|(i, pk)| make_mblock_tx_chain(pk, (25 * i) as u64, chain_id)) - .collect(); - - let _handle = thread::spawn(move || { - for (i, txi) in all_txs.iter().enumerate() { - for (j, tx) in txi.iter().enumerate() { - eprintln!("\n\nSubmit tx {i},{j}\n\n"); - submit_tx(&http_origin, tx); - sleep_ms(1_000); - } - } - }); - - for _i in 0..10 { - sleep_ms(30_000); - btc_regtest_controller.build_next_block(1); - } - - channel.stop_chains_coordinator(); -} - #[test] #[ignore] fn min_txs() { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 185335ce1ed..4f9ee34f37c 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -32,10 +32,11 @@ use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::StacksPrivateKey; use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; +use stacks::core::test_util::{make_contract_call, make_contract_publish, make_stacks_transfer}; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; -use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; +use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPublicKey}; use stacks::types::PrivateKey; use stacks::util::get_epoch_time_secs; use stacks::util::hash::MerkleHashFunc; @@ -47,9 +48,13 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; +use stacks_signer::v0::signer_state::{LocalStateMachine, MinerState}; use stacks_signer::{Signer, SpawnedSigner}; -use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; +use super::nakamoto_integrations::{ + check_nakamoto_empty_block_heuristics, next_block_and, wait_for, +}; +use super::neon_integrations::{get_account, get_sortition_info_ch, submit_tx_fallible}; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; @@ -209,13 +214,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest) { - for signer_ix in 0..self.spawned_signers.len() { + for (signer_ix, signer_config) in self.signer_configs.iter().enumerate() { if exclude.contains(&signer_ix) { continue; } - let port = 3000 + signer_ix; - let endpoint = format!("http://localhost:{port}"); - let path = format!("{endpoint}/status"); + let path = format!("http://{}/status", signer_config.endpoint); debug!("Issue status request to {path}"); let client = reqwest::blocking::Client::new(); @@ -227,9 +230,9 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest info.burn_block_height) + }) + .unwrap(); + } + + /// Fetch the local signer state machine for all the signers, + /// waiting until every signer has processed the latest burn block. + /// Then, check that every signer's state machine corresponds to the + /// latest burn block: + /// 1. Having a valid sortition + /// 2. The active miner is the winner of that sortition + /// 3. The active miner is building off of the prior tenure + pub fn check_signer_states_normal(&mut self) { + let (state_machines, info_cur) = self.get_burn_updated_states(); + + let sortition_latest = + get_sortition_info_ch(&self.running_nodes.conf, &info_cur.pox_consensus); + let sortition_prior = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.last_sortition_ch.as_ref().unwrap(), + ); + + info!("Latest sortition: {sortition_latest:?}"); + info!("Prior sortition: {sortition_prior:?}"); + + assert_eq!( + sortition_latest.last_sortition_ch, + sortition_latest.stacks_parent_ch + ); + let latest_block = self + .stacks_client + .get_tenure_tip(&sortition_prior.consensus_hash) + .unwrap(); + let latest_block_id = + StacksBlockId::new(&sortition_prior.consensus_hash, &latest_block.block_hash()); + + state_machines + .into_iter() + .enumerate() + .for_each(|(ix, state_machine)| { + let LocalStateMachine::Initialized(state_machine) = state_machine else { + error!("Local state machine was not initialized"); + panic!(); + }; + + info!("Evaluating Signer #{ix}"; "state_machine" => ?state_machine); + + assert_eq!(state_machine.burn_block, info_cur.pox_consensus,); + assert_eq!(state_machine.burn_block_height, info_cur.burn_block_height,); + let MinerState::ActiveMiner { + current_miner_pkh, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + .. + } = state_machine.current_miner + else { + error!("State machine for Signer #{ix} did not have an active miner"); + panic!(); + }; + assert_eq!(Some(current_miner_pkh), sortition_latest.miner_pk_hash160); + assert_eq!(parent_tenure_id, sortition_prior.consensus_hash); + assert_eq!(parent_tenure_last_block, latest_block_id); + assert_eq!(parent_tenure_last_block_height, latest_block.height()); + }); + } + + /// Fetch the local signer state machine for all the signers, + /// waiting until every signer has processed the latest burn block. + /// Then, check that every signer's state machine corresponds to the + /// latest burn block: + /// 1. Having an invalid miner + /// 2. The active miner is the winner of the prior sortition + pub fn check_signer_states_revert_to_prior(&mut self) { + let (state_machines, info_cur) = self.get_burn_updated_states(); + + let sortition_latest = + get_sortition_info_ch(&self.running_nodes.conf, &info_cur.pox_consensus); + let sortition_prior = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.last_sortition_ch.as_ref().unwrap(), + ); + + info!("Latest sortition: {sortition_latest:?}"); + info!("Prior sortition: {sortition_prior:?}"); + + let latest_block = self + .stacks_client + .get_tenure_tip(sortition_prior.stacks_parent_ch.as_ref().unwrap()) + .unwrap(); + let latest_block_id = StacksBlockId::new( + sortition_prior.stacks_parent_ch.as_ref().unwrap(), + &latest_block.block_hash(), + ); + + state_machines + .into_iter() + .enumerate() + .for_each(|(ix, state_machine)| { + let LocalStateMachine::Initialized(state_machine) = state_machine else { + error!("Local state machine was not initialized"); + panic!(); + }; + + info!("Evaluating Signer #{ix}"; "state_machine" => ?state_machine); + + assert_eq!(state_machine.burn_block, info_cur.pox_consensus,); + assert_eq!(state_machine.burn_block_height, info_cur.burn_block_height,); + let MinerState::ActiveMiner { + current_miner_pkh, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + tenure_id, + } = state_machine.current_miner + else { + error!("State machine for Signer #{ix} did not have an active miner"); + panic!(); + }; + assert_eq!(tenure_id, sortition_prior.consensus_hash); + assert_eq!(Some(current_miner_pkh), sortition_prior.miner_pk_hash160); + assert_eq!(Some(parent_tenure_id), sortition_prior.stacks_parent_ch); + assert_eq!(parent_tenure_last_block, latest_block_id); + assert_eq!(parent_tenure_last_block_height, latest_block.height()); + }); + } + + /// Submit a stacks transfer just to trigger block production + pub fn submit_transfer_tx( + &mut self, + sender_sk: &StacksPrivateKey, + send_fee: u64, + send_amt: u64, + ) -> Result<(String, u64), String> { + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let sender_addr = to_addr(&sender_sk); + let sender_nonce = get_account(&http_origin, &sender_addr).nonce; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + self.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx_fallible(&http_origin, &transfer_tx).map(|resp| (resp, sender_nonce)) + } + + /// Submit a burn block dependent contract for publishing + /// and wait until it is included in a block + pub fn submit_burn_block_contract_and_wait( + &mut self, + sender_sk: &StacksPrivateKey, + ) -> Result { + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let sender_addr = to_addr(&sender_sk); + let sender_nonce = get_account(&http_origin, &sender_addr).nonce; + let burn_height_contract = " + (define-data-var local-burn-block-ht uint u0) + (define-public (run-update) + (ok (var-set local-burn-block-ht burn-block-height))) + "; + let contract_tx = make_contract_publish( + &sender_sk, + 0, + 1000, + self.running_nodes.conf.burnchain.chain_id, + "burn-height-local", + burn_height_contract, + ); + let txid = submit_tx_fallible(&http_origin, &contract_tx)?; + + wait_for(120, || { + let next_nonce = get_account(&http_origin, &sender_addr).nonce; + Ok(next_nonce > sender_nonce) + }) + .map(|()| txid) + } + + /// Submit a burn block dependent contract-call + /// and wait until it is included in a block + pub fn submit_burn_block_call_and_wait( + &mut self, + sender_sk: &StacksPrivateKey, + ) -> Result { + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let sender_addr = to_addr(&sender_sk); + let sender_nonce = get_account(&http_origin, &sender_addr).nonce; + let contract_call_tx = make_contract_call( + &sender_sk, + sender_nonce, + 1000, + self.running_nodes.conf.burnchain.chain_id, + &sender_addr, + "burn-height-local", + "run-update", + &[], + ); + let txid = submit_tx_fallible(&http_origin, &contract_call_tx)?; + + wait_for(120, || { + let next_nonce = get_account(&http_origin, &sender_addr).nonce; + Ok(next_nonce > sender_nonce) + }) + .map(|()| txid) + } + + /// Get the local state machines and most recent peer info from the stacks-node, + /// waiting until all of the signers have updated their state machines to + /// reflect the most recent burn block. + pub fn get_burn_updated_states(&mut self) -> (Vec, PeerInfo) { + let info_cur = self.get_peer_info(); + let current_rc = self.get_current_reward_cycle(); + let mut states = Vec::with_capacity(0); + // fetch all the state machines *twice* + // we do this because the state machines return before the signer runloop + // invokes run_one_pass(), which is necessary to handle any pending updates to + // the state machine. + // we get around this by just doing this twice + for _i in 0..2 { + wait_for(120, || { + states = self.get_all_states(); + Ok(states.iter().enumerate().all(|(ix, signer_state)| { + let Some(Some(state_machine)) = signer_state + .signer_state_machines + .iter() + .find_map(|(rc, state)| { + if current_rc % 2 == *rc { + Some(state.as_ref()) + } else { + None + } + }) + else { + let rcs_set: Vec<_> = signer_state.signer_state_machines.iter().map(|(rc, state)| { + (rc, state.is_some()) + }).collect(); + warn!( + "Local state machine for signer #{ix} not set for reward cycle #{current_rc} yet"; + "burn_block_height" => info_cur.burn_block_height, + "rcs_set" => ?rcs_set + ); + return false; + }; + + let LocalStateMachine::Initialized(state_machine) = state_machine else { + warn!("Local state machine for signer #{ix} not initialized"); + return false; + }; + state_machine.burn_block_height >= info_cur.burn_block_height + })) + }) + .expect("Timed out while waiting to fetch local state machines from the signer set"); + } + + let state_machines = states + .into_iter() + .map(|signer_state| { + signer_state + .signer_state_machines + .into_iter() + .find_map(|(rc, state)| if current_rc % 2 == rc { Some(state) } else { None }) + .expect( + "BUG: should be able to find signer state machine at the current reward cycle", + ) + .expect("BUG: signer state machine should exist at the current reward cycle") + }) + .collect(); + + (state_machines, info_cur) + } + + /// Fetch the local signer state machine for all the signers, + /// waiting until every signer has processed the latest burn block. + /// Then, check that every signer's state machine corresponds to the + /// latest burn block: + /// 1. Not having a sortition! + /// 2. The active miner is the winner of the last sortition + /// 3. The active miner is building off of the prior tenure + pub fn check_signer_states_normal_missed_sortition(&mut self) { + let (state_machines, info_cur) = self.get_burn_updated_states(); + let non_sortition_latest = + get_sortition_info_ch(&self.running_nodes.conf, &info_cur.pox_consensus); + + assert!( + !non_sortition_latest.was_sortition, + "Most recent burn block should have no sortition", + ); + + let sortition_latest = get_sortition_info_ch( + &self.running_nodes.conf, + &non_sortition_latest.last_sortition_ch.as_ref().unwrap(), + ); + let sortition_prior = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.last_sortition_ch.as_ref().unwrap(), + ); + + info!("Latest non-sortition: {non_sortition_latest:?}"); + info!("Latest sortition: {sortition_latest:?}"); + info!("Prior sortition: {sortition_prior:?}"); + + assert_eq!( + sortition_latest.last_sortition_ch, + sortition_latest.stacks_parent_ch + ); + let latest_block = self + .stacks_client + .get_tenure_tip(&sortition_prior.consensus_hash) + .unwrap(); + let latest_block_id = + StacksBlockId::new(&sortition_prior.consensus_hash, &latest_block.block_hash()); + + state_machines + .into_iter() + .enumerate() + .for_each(|(ix, state_machine)| { + let LocalStateMachine::Initialized(state_machine) = state_machine else { + error!("Local state machine was not initialized"); + panic!(); + }; + + assert_eq!(state_machine.burn_block, info_cur.pox_consensus,); + assert_eq!(state_machine.burn_block_height, info_cur.burn_block_height,); + let MinerState::ActiveMiner { + current_miner_pkh, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + .. + } = state_machine.current_miner + else { + error!("State machine for Signer #{ix} did not have an active miner"); + panic!(); + }; + assert_eq!(Some(current_miner_pkh), sortition_latest.miner_pk_hash160); + assert_eq!(parent_tenure_id, sortition_prior.consensus_hash); + assert_eq!(parent_tenure_last_block, latest_block_id); + assert_eq!(parent_tenure_last_block_height, latest_block.height()); + }); + } + + /// Fetch the local signer state machine for all the signers, + /// waiting until every signer has processed the latest burn block. + /// Then, check that every signer's state machine corresponds to the + /// latest burn block: + /// 1. Having a valid sortition + /// 2. The active miner is the winner of that sortition + /// 3. The active miner is building off of the prior tenure + pub fn check_signer_states_reorg( + &mut self, + accepting_reorg: &[StacksPublicKey], + rejecting_reorg: &[StacksPublicKey], + ) { + let accepting_reorg: Vec<_> = accepting_reorg + .iter() + .map(|pk| { + self.signer_stacks_private_keys + .iter() + .position(|sk| &StacksPublicKey::from_private(&sk) == pk) + .unwrap() + }) + .collect(); + let rejecting_reorg: Vec<_> = rejecting_reorg + .iter() + .map(|pk| { + self.signer_stacks_private_keys + .iter() + .position(|sk| &StacksPublicKey::from_private(&sk) == pk) + .unwrap() + }) + .collect(); + + let (state_machines, info_cur) = self.get_burn_updated_states(); + + let sortition_latest = + get_sortition_info_ch(&self.running_nodes.conf, &info_cur.pox_consensus); + let sortition_parent = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.stacks_parent_ch.as_ref().unwrap(), + ); + let sortition_prior = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.last_sortition_ch.as_ref().unwrap(), + ); + assert!(sortition_latest.last_sortition_ch != sortition_latest.stacks_parent_ch); + let latest_block = self + .stacks_client + .get_tenure_tip(&sortition_parent.consensus_hash) + .unwrap(); + let latest_block_id = + StacksBlockId::new(&sortition_parent.consensus_hash, &latest_block.block_hash()); + + state_machines + .into_iter() + .enumerate() + .for_each(|(ix, state_machine)| { + let LocalStateMachine::Initialized(state_machine) = state_machine else { + error!("Local state machine was not initialized"); + panic!(); + }; + + info!("Signer #{ix} has state machine: {state_machine:?}"); + + assert_eq!(state_machine.burn_block, info_cur.pox_consensus,); + assert_eq!(state_machine.burn_block_height, info_cur.burn_block_height,); + let MinerState::ActiveMiner { current_miner_pkh, parent_tenure_id, parent_tenure_last_block, parent_tenure_last_block_height, .. } = + state_machine.current_miner + else { + error!("State machine for Signer #{ix} did not have an active miner"); + panic!(); + }; + if accepting_reorg.contains(&ix) { + assert_eq!(Some(current_miner_pkh), sortition_latest.miner_pk_hash160); + assert_eq!(parent_tenure_id, sortition_parent.consensus_hash); + assert_eq!(parent_tenure_last_block, latest_block_id); + assert_eq!(parent_tenure_last_block_height, latest_block.height()); + } else if rejecting_reorg.contains(&ix) { + assert_eq!(Some(current_miner_pkh), sortition_prior.miner_pk_hash160); + } else { + error!("Signer #{ix} was not supplied in either the approving or rejecting vectors"); + panic!(); + } + }); + } + + /// Get status check results (if returned) from each signer (blocks on the receipt) + /// Returns Some() or None() for each signer, in order of `self.spawned_signers` + pub fn get_all_states(&mut self) -> Vec { + let mut finished_signers = HashSet::new(); + let mut output_states = Vec::new(); + let mut sent_request = false; + wait_for(120, || { + if !sent_request { + // clear any stale states + if self + .get_states(&finished_signers) + .iter() + .any(|s| s.is_some()) + { + info!("Had stale state responses, trying again to clear"); + return Ok(false); + } + self.send_status_request(&finished_signers); + sent_request = true; + thread::sleep(Duration::from_secs(1)); + } + + let latest_states = self.get_states(&finished_signers); + for (ix, state) in latest_states.into_iter().enumerate() { + let Some(state) = state else { + continue; + }; + + finished_signers.insert(ix); + output_states.push((ix, state)); + } + info!( + "Finished signers: {:?}", + finished_signers.iter().collect::>() + ); + Ok(finished_signers.len() == self.spawned_signers.len()) + }) + .expect("Timed out waiting for state responses from signer set"); + + output_states.sort_by_key(|(ix, _state)| *ix); + output_states + .into_iter() + .map(|(_ix, state)| state) + .collect() + } + + /// Replace the test's configured signer st + pub fn replace_signers( + &mut self, + new_signers: Vec>, + new_signers_sks: Vec, + new_signer_configs: Vec, + ) -> ( + Vec>, + Vec, + Vec, + ) { + let old_signers = std::mem::replace(&mut self.spawned_signers, new_signers); + let old_signers_sks = + std::mem::replace(&mut self.signer_stacks_private_keys, new_signers_sks); + let old_signers_confs = std::mem::replace(&mut self.signer_configs, new_signer_configs); + (old_signers, old_signers_sks, old_signers_confs) + } + /// Get status check results (if returned) from each signer without blocking /// Returns Some() or None() for each signer, in order of `self.spawned_signers` pub fn get_states(&mut self, exclude: &HashSet) -> Vec> { @@ -302,17 +799,14 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>() } + /// Get the signer public keys by directly computing them from this signer test's + /// signer private keys. + pub fn signer_test_pks(&self) -> Vec { + self.signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect() + } + /// Get the signer public keys for the given reward cycle fn get_signer_public_keys(&self, reward_cycle: u64) -> Vec { let entries = self.get_reward_set_signers(reward_cycle); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3485e53c5d8..aa27fdaf6e5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -24,24 +24,31 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, PeerInfo, RejectCode, - RejectReason, SignerMessage, + RejectReason, SignerMessage, StateMachineUpdateContent, StateMachineUpdateMinerState, }; use libsigner::{ BlockProposal, BlockProposalData, SignerSession, StackerDBSession, VERSION_STRING, }; +use rand::{thread_rng, Rng}; +use rusqlite::Connection; use stacks::address::AddressHashMode; use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::LeaderBlockCommitOp; +use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::miner::{TransactionEvent, TransactionSuccessEvent}; use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig}; +use stacks::core::mempool::MemPoolWalkStrategy; +use stacks::core::test_util::{ + insert_tx_in_mempool, make_contract_call, make_contract_publish, make_stacks_transfer, +}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; @@ -68,6 +75,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::signerdb::SignerDb; +use stacks_signer::v0::signer::TEST_REPEAT_PROPOSAL_RESPONSE; use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, TEST_SKIP_SIGNER_CLEANUP, TEST_STALL_BLOCK_VALIDATION_SUBMISSION, @@ -93,13 +101,10 @@ use crate::tests::nakamoto_integrations::{ POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ - get_account, get_chain_info, get_chain_info_opt, get_pox_info, get_sortition_info, - get_sortition_info_ch, next_block_and_wait, run_until_burnchain_height, submit_tx, - submit_tx_fallible, test_observer, -}; -use crate::tests::{ - self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, + get_account, get_chain_info, get_chain_info_opt, get_sortition_info, get_sortition_info_ch, + next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; +use crate::tests::{self, gen_random_port}; use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { @@ -155,7 +160,7 @@ impl SignerTest { .to_rsv(); let signer_pk = StacksPublicKey::from_private(stacker_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -235,7 +240,7 @@ impl SignerTest { &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.counters.blocks_processed, ); - self.wait_for_registered(30); + self.wait_for_registered(); debug!("Signers initialized"); let current_burn_block_height = self @@ -278,7 +283,7 @@ impl SignerTest { &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.counters.blocks_processed, ); - self.wait_for_registered(30); + self.wait_for_registered(); info!("Signers initialized"); self.run_until_epoch_3_boundary(); @@ -312,6 +317,7 @@ impl SignerTest { let reward_cycle = self.get_current_reward_cycle(); self.mine_nakamoto_block(timeout, use_nakamoto_blocks_mined); + self.check_signer_states_normal(); // Verify that the signers accepted the proposed block, sending back a validate ok response let proposed_signer_signature_hash = self @@ -458,7 +464,6 @@ impl SignerTest { pub struct MultipleMinerTest { signer_test: SignerTest, sender_sk: Secp256k1PrivateKey, - sender_nonce: u64, send_amt: u64, send_fee: u64, conf_node_2: NeonConfig, @@ -602,7 +607,6 @@ impl MultipleMinerTest { MultipleMinerTest { signer_test, sender_sk, - sender_nonce: 0, send_amt, send_fee, conf_node_2, @@ -734,35 +738,109 @@ impl MultipleMinerTest { ) } - /// Sends a transfer tx to the stacks node and returns the txid - pub fn send_transfer_tx(&mut self) -> String { - let http_origin = format!( + /// Sends a transfer tx to the stacks node and returns the txid and nonce used + pub fn send_transfer_tx(&mut self) -> (String, u64) { + self.signer_test + .submit_transfer_tx(&self.sender_sk, self.send_fee, self.send_amt) + .unwrap() + } + + fn node_http(&self) -> String { + format!( "http://{}", &self.signer_test.running_nodes.conf.node.rpc_bind - ); - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + ) + } + + /// Sends a transfer tx to the stacks node and waits for the stacks node to mine it + /// Returns the txid of the transfer tx. + pub fn send_and_mine_transfer_tx(&mut self, timeout_secs: u64) -> Result { + let (txid, nonce) = self.send_transfer_tx(); + let http_origin = self.node_http(); + let sender_addr = tests::to_addr(&self.sender_sk); + wait_for(timeout_secs, || { + Ok(get_account(&http_origin, &sender_addr).nonce > nonce) + })?; + Ok(txid) + } + + pub fn send_contract_publish( + &mut self, + sender_nonce: u64, + contract_name: &str, + contract_src: &str, + ) -> String { + let http_origin = self.node_http(); + let contract_tx = make_contract_publish( &self.sender_sk, - self.sender_nonce, - self.send_fee, + sender_nonce, + self.send_fee + contract_name.len() as u64 + contract_src.len() as u64, self.signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - self.send_amt, + contract_name, + contract_src, ); - self.sender_nonce += 1; - submit_tx(&http_origin, &transfer_tx) + submit_tx(&http_origin, &contract_tx) } - /// Sends a transfer tx to the stacks node and waits for the stacks node to mine it + /// Sends a contract publish tx to the stacks node and waits for the stacks node to mine it /// Returns the txid of the transfer tx. - pub fn send_and_mine_transfer_tx(&mut self, timeout_secs: u64) -> Result { + pub fn send_and_mine_contract_publish( + &mut self, + sender_nonce: u64, + contract_name: &str, + contract_src: &str, + timeout_secs: u64, + ) -> Result { let stacks_height_before = self.get_peer_stacks_tip_height(); - let txid = self.send_transfer_tx(); + + let txid = self.send_contract_publish(sender_nonce, contract_name, contract_src); + + // wait for the new block to be mined wait_for(timeout_secs, || { Ok(self.get_peer_stacks_tip_height() > stacks_height_before) - })?; - Ok(txid) + }) + .unwrap(); + + // wait for the observer to see it + self.wait_for_test_observer_blocks(timeout_secs); + + if last_block_contains_txid(&txid) { + Ok(txid) + } else { + Err(txid) + } + } + + pub fn send_contract_call( + &mut self, + sender_nonce: u64, + contract_name: &str, + function_name: &str, + function_args: &[clarity::vm::Value], + ) -> String { + let http_origin = self.node_http(); + // build a fake tx for getting a rough amount of fee + let fake_contract_tx = make_contract_call( + &self.sender_sk, + sender_nonce, + 100, + self.signer_test.running_nodes.conf.burnchain.chain_id, + &tests::to_addr(&self.sender_sk), + contract_name, + function_name, + function_args, + ); + let contract_tx = make_contract_call( + &self.sender_sk, + sender_nonce, + fake_contract_tx.len() as u64, + self.signer_test.running_nodes.conf.burnchain.chain_id, + &tests::to_addr(&self.sender_sk), + contract_name, + function_name, + function_args, + ); + submit_tx(&http_origin, &contract_tx) } /// Return the Peer Info from node 1 @@ -822,6 +900,20 @@ impl MultipleMinerTest { self.rl2_counters.naka_skip_commit_op.set(true); } + /// Pause miner 1's commits + pub fn pause_commits_miner_1(&mut self) { + self.signer_test + .running_nodes + .counters + .naka_skip_commit_op + .set(true); + } + + /// Pause miner 2's commits + pub fn pause_commits_miner_2(&mut self) { + self.rl2_counters.naka_skip_commit_op.set(true); + } + /// Ensures that miner 1 submits a commit pointing to the current view reported by the stacks node as expected pub fn submit_commit_miner_1(&mut self, sortdb: &SortitionDB) { if !self @@ -897,6 +989,20 @@ impl MultipleMinerTest { self.signer_test.shutdown(); } + pub fn wait_for_test_observer_blocks(&self, timeout_secs: u64) { + let block_header_heash_tip = format!("0x{}", self.get_peer_stacks_tip().to_hex()); + + wait_for(timeout_secs, || { + for block in test_observer::get_blocks().iter().rev() { + if block["block_hash"].as_str().unwrap() == block_header_heash_tip { + return Ok(true); + } + } + Ok(false) + }) + .expect("Timed out waiting for test_observer blocks"); + } + /// Wait for both miners to have the same stacks tip height pub fn wait_for_chains(&self, timeout_secs: u64) { wait_for(timeout_secs, || { @@ -934,6 +1040,22 @@ fn last_block_contains_tenure_change_tx(cause: TenureChangeCause) -> bool { } } +/// Check if a txid exists in the last block +fn last_block_contains_txid(txid: &str) -> bool { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + for tx in transactions { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if parsed.txid().to_string() == txid { + return true; + } + } + false +} + /// Asserts that the last block in the test observer contains a tenure change with the given cause. fn verify_last_block_contains_tenure_change_tx(cause: TenureChangeCause) { assert!(last_block_contains_tenure_change_tx(cause)); @@ -1253,6 +1375,65 @@ pub fn wait_for_block_rejections_from_signers( Ok(result) } +/// Waits for all of the provided signers to send an update for a block with the specificed burn block height and parent tenure stacks block height +pub fn wait_for_state_machine_update( + timeout_secs: u64, + expected_burn_block: &ConsensusHash, + expected_burn_block_height: u64, + expected_miner_info: Option<(Hash160, u64)>, +) -> Result<(), String> { + wait_for(timeout_secs, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + for chunk in stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + let SignerMessage::StateMachineUpdate(update) = message else { + continue; + }; + let StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + } = &update.content; + if *burn_block_height != expected_burn_block_height || burn_block != expected_burn_block + { + continue; + } + match current_miner { + StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + parent_tenure_last_block_height, + .. + } => { + if let Some(( + expected_miner_pkh, + expected_miner_parent_tenure_last_block_height, + )) = expected_miner_info + { + if expected_miner_pkh != *current_miner_pkh + || expected_miner_parent_tenure_last_block_height + != *parent_tenure_last_block_height + { + continue; + } + } + } + StateMachineUpdateMinerState::NoValidMiner => { + if expected_miner_info.is_some() { + continue; + }; + } + } + // We only need one update to match our conditions + return Ok(true); + } + Ok(false) + }) +} + #[test] #[ignore] /// Test that a signer can respond to an invalid block proposal @@ -1405,6 +1586,7 @@ fn miner_gather_signatures() { info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); + signer_test.check_signer_states_normal(); // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] @@ -1829,7 +2011,7 @@ fn reloads_signer_set_in() { &mut signer_test.running_nodes.btc_regtest_controller, &signer_test.running_nodes.counters.blocks_processed, ); - signer_test.wait_for_registered(30); + signer_test.wait_for_registered(); info!("Signers initialized"); signer_test.run_until_epoch_3_boundary(); @@ -1958,6 +2140,8 @@ fn forked_tenure_testing( ) .unwrap(); + signer_test.check_signer_states_normal(); + sleep_ms(1000); let tip_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -1982,6 +2166,8 @@ fn forked_tenure_testing( ) .unwrap(); + signer_test.check_signer_states_normal(); + info!("Commit op is submitted; unpause tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits. @@ -2113,6 +2299,13 @@ fn forked_tenure_testing( panic!(); }); + let signer_pks = signer_test.signer_test_pks(); + if expect_tenure_c { + signer_test.check_signer_states_reorg(&signer_pks, &[]); + } else { + signer_test.check_signer_states_reorg(&[], &signer_pks); + }; + // allow blocks B and C to be processed sleep_ms(1000); @@ -2180,6 +2373,12 @@ fn forked_tenure_testing( // Mine tenure D signer_test.mine_nakamoto_block(Duration::from_secs(60), false); + if expect_tenure_c { + signer_test.check_signer_states_normal(); + } else { + signer_test.check_signer_states_reorg(&signer_pks, &[]); + } + let tip_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); @@ -2216,6 +2415,7 @@ fn bitcoind_forking_test() { vec![(sender_addr, send_amt + send_fee)], |_| {}, |node_config| { + node_config.miner.block_commit_delay = Duration::from_secs(1); let epochs = node_config.burnchain.epochs.as_mut().unwrap(); epochs[StacksEpochId::Epoch30].end_height = 3_015; epochs[StacksEpochId::Epoch31].start_height = 3_015; @@ -2264,6 +2464,7 @@ fn bitcoind_forking_test() { for i in 0..pre_fork_tenures { info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); } let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2286,6 +2487,9 @@ fn bitcoind_forking_test() { .btc_regtest_controller .build_next_block(1); + // note, we should still have normal signer states! + signer_test.check_signer_states_normal(); + info!("Wait for block off of shallow fork"); TEST_MINE_STALL.set(true); @@ -2330,6 +2534,7 @@ fn bitcoind_forking_test() { }, ) .unwrap(); + signer_test.check_signer_states_normal_missed_sortition(); } let post_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2341,6 +2546,11 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + if i == 0 { + signer_test.check_signer_states_reorg(&signer_test.signer_test_pks(), &[]); + } else { + signer_test.check_signer_states_normal(); + } } let pre_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2367,6 +2577,7 @@ fn bitcoind_forking_test() { .btc_regtest_controller .build_next_block(4); + signer_test.check_signer_states_normal(); info!("Wait for block off of deep fork"); let commits_submitted = signer_test @@ -2409,6 +2620,7 @@ fn bitcoind_forking_test() { }, ) .unwrap(); + signer_test.check_signer_states_normal_missed_sortition(); } let post_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2420,6 +2632,11 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + if i == 0 { + signer_test.check_signer_states_reorg(&signer_test.signer_test_pks(), &[]); + } else { + signer_test.check_signer_states_normal(); + } } let test_end_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2485,6 +2702,8 @@ fn multiple_miners() { Duration::from_secs(30), ); + miners.signer_test.check_signer_states_normal(); + btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf_1); // for this test, there should be one block per tenure @@ -2650,6 +2869,7 @@ fn miner_forking() { .expect("Failed to mine BTC block."); miners.wait_for_chains(120); + miners.signer_test.check_signer_states_normal(); // make sure the tenure was won by RL1 verify_sortition_winner(&sortdb, &mining_pkh_1); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -2696,6 +2916,9 @@ fn miner_forking() { miners .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::Extended, 60) .expect("Failed to mine BTC block followed by tenure change tx."); + miners + .signer_test + .check_signer_states_reorg(&[], &miners.signer_test.signer_test_pks()); miners.wait_for_chains(120); // fetch the current sortition info let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -2738,6 +2961,9 @@ fn miner_forking() { miners .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 60) .expect("Failed to mine BTC block."); + miners + .signer_test + .check_signer_states_reorg(&miners.signer_test.signer_test_pks(), &[]); miners.submit_commit_miner_1(&sortdb); // unblock block mining let blocks_len = test_observer::get_blocks().len(); @@ -2754,6 +2980,7 @@ fn miner_forking() { miners .mine_bitcoin_blocks_and_confirm_with_test_observer(&sortdb, 1, 60) .expect("Failed to mine BTC block."); + miners.signer_test.check_signer_states_normal(); // fetch the current sortition info miners.wait_for_chains(120); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -2872,10 +3099,9 @@ fn end_of_tenure() { TEST_VALIDATE_STALL.set(true); let proposals_before = proposed_blocks.load(Ordering::SeqCst); - let blocks_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + let info = signer_test.get_peer_info(); + let blocks_before = info.stacks_tip_height; - let info = get_chain_info(&signer_test.running_nodes.conf); - let start_height = info.stacks_tip_height; // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = make_stacks_transfer( @@ -2941,7 +3167,7 @@ fn end_of_tenure() { .expect("Timed out waiting for block to be mined"); let info = get_chain_info(&signer_test.running_nodes.conf); - assert_eq!(info.stacks_tip_height, start_height + 1); + assert_eq!(info.stacks_tip_height, blocks_before + 1); signer_test.shutdown(); } @@ -3038,12 +3264,13 @@ fn retry_on_rejection() { submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); - loop { + wait_for(60, || { if proposed_blocks.load(Ordering::SeqCst) > proposals_before { - break; + return Ok(true); } - std::thread::sleep(Duration::from_millis(100)); - } + Ok(false) + }) + .expect("Timed out waiting for block proposal"); info!("Block proposed, verifying that it is not processed"); // Wait 10 seconds to be sure that the timeout has occurred @@ -3053,12 +3280,15 @@ fn retry_on_rejection() { // resume signing info!("Disable unconditional rejection and wait for the block to be processed"); TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); - loop { + + wait_for(60, || { if mined_blocks.load(Ordering::SeqCst) > blocks_before { - break; + return Ok(true); } - std::thread::sleep(Duration::from_millis(100)); - } + Ok(false) + }) + .expect("Timed out waiting for block to be mined"); + signer_test.shutdown(); } @@ -3096,6 +3326,7 @@ fn signers_broadcast_signed_blocks() { .clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); wait_for(30, || { let blocks_mined = mined_blocks.load(Ordering::SeqCst); @@ -3180,6 +3411,7 @@ fn tenure_extend_after_idle_signers() { info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); info!("---- Waiting for a tenure extend ----"); @@ -3236,6 +3468,7 @@ fn tenure_extend_with_other_transactions() { info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); info!("Pause miner so it doesn't propose a block before the tenure extend"); TEST_MINE_STALL.set(true); @@ -3341,6 +3574,7 @@ fn tenure_extend_after_idle_miner() { info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); info!("---- Start a new tenure but ignore block signatures so no timestamps are recorded ----"); let tip_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; @@ -3417,6 +3651,7 @@ fn tenure_extend_succeeds_after_rejected_attempt() { info!("---- Nakamoto booted, starting test ----"); let stacks_tip_height = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); info!("---- Waiting for a rejected tenure extend ----"); // Now, wait for a block with a tenure extend proposal from the miner, but ensure it is rejected. @@ -3601,7 +3836,7 @@ fn idle_tenure_extend_active_mining() { let amount = deploy_fee + tx_fee * num_txs * tenure_count * num_naka_blocks * 100 + 100 * tenure_count; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let idle_timeout = Duration::from_secs(60); + let idle_timeout = Duration::from_secs(30); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, amount), (deployer_addr, amount)], @@ -3788,7 +4023,7 @@ fn idle_tenure_extend_active_mining() { ); // Now, wait for the idle timeout to trigger - wait_for(extend_diff + 30, || { + wait_for(idle_timeout.as_secs() * 2, || { Ok(last_block_contains_tenure_change_tx( TenureChangeCause::Extended, )) @@ -3832,11 +4067,9 @@ fn idle_tenure_extend_active_mining() { #[test] #[ignore] /// This test checks the behaviour of signers when a sortition is empty. Specifically: -/// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. +/// - An empty tenure will cause the signers to mark a miner as misbehaving once a timeout is exceeded. /// - The miner will stop trying to mine once it sees a threshold of signers reject the block -/// - The empty sortition will trigger the miner to attempt a tenure extend. -/// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition -fn empty_sortition() { +fn empty_tenure_delayed() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -3861,7 +4094,9 @@ fn empty_sortition() { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; }, - |_| {}, + |node_config| { + node_config.miner.block_commit_delay = Duration::from_secs(2); + }, None, None, ); @@ -3875,16 +4110,13 @@ fn empty_sortition() { let Counters { naka_mined_blocks: mined_blocks, naka_submitted_commits: submitted_commits, - naka_skip_commit_op: skip_commit_op, naka_rejected_blocks: rejected_blocks, .. } = signer_test.running_nodes.counters.clone(); - TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk]); - info!("------------------------- Test Mine Regular Tenure A -------------------------"); let commits_before = submitted_commits.load(Ordering::SeqCst); - // Mine a regular tenure + // Mine a regular tenure, but wait for commits to be submitted next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3894,11 +4126,14 @@ fn empty_sortition() { }, ) .unwrap(); + signer_test.check_signer_states_normal(); info!("------------------------- Test Mine Empty Tenure B -------------------------"); - info!("Pausing stacks block mining to trigger an empty sortition."); let blocks_before = mined_blocks.load(Ordering::SeqCst); let commits_before = submitted_commits.load(Ordering::SeqCst); + info!("Pausing stacks block proposal to force an empty tenure"); + TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk]); + // Start new Tenure B // In the next block, the miner should win the tenure next_block_and( @@ -3910,12 +4145,7 @@ fn empty_sortition() { }, ) .unwrap(); - - info!("Pausing stacks block proposal to force an empty tenure"); - TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk]); - - info!("Pausing commit op to prevent tenure C from starting..."); - skip_commit_op.set(true); + signer_test.check_signer_states_normal(); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before); @@ -3936,6 +4166,8 @@ fn empty_sortition() { std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + signer_test.check_signer_states_revert_to_prior(); + TEST_BROADCAST_PROPOSAL_STALL.set(vec![]); info!("------------------------- Test Delayed Block is Rejected -------------------------"); @@ -4076,6 +4308,7 @@ fn empty_sortition_before_approval() { || Ok(proposed_blocks.load(Ordering::SeqCst) > proposed_before), ) .expect("Failed to mine tenure A and propose a block"); + signer_test.check_signer_states_normal(); info!("------------------------- Test Mine Empty Tenure B -------------------------"); @@ -4089,6 +4322,7 @@ fn empty_sortition_before_approval() { }, ) .expect("Failed to mine empty tenure"); + signer_test.check_signer_states_normal_missed_sortition(); info!("Unpause block commits"); skip_commit_op.set(false); @@ -4220,8 +4454,7 @@ fn empty_sortition_before_proposal() { }) .expect("Failed to advance chain tip"); - // Sleep a bit more to ensure the signers see both burn blocks - sleep_ms(5_000); + signer_test.check_signer_states_normal_missed_sortition(); info!("Unpause miner"); TEST_MINE_STALL.set(false); @@ -4296,6 +4529,7 @@ fn empty_sortition_before_proposal() { &signer_test.running_nodes.coord_channel, ) .expect("Failed to mine a normal tenure after the tenure extend"); + signer_test.check_signer_states_normal(); info!("------------------------- Shutdown -------------------------"); @@ -4594,12 +4828,16 @@ fn signer_set_rollover() { None, ); + let new_signer_configs: Vec<_> = new_signer_configs + .iter() + .map(|conf_str| SignerConfig::load_from_str(conf_str).unwrap()) + .collect(); + let new_spawned_signers: Vec<_> = new_signer_configs .iter() - .map(|conf| { + .map(|signer_config| { info!("spawning signer"); - let signer_config = SignerConfig::load_from_str(conf).unwrap(); - SpawnedSigner::new(signer_config) + SpawnedSigner::new(signer_config.clone()) }) .collect(); @@ -4609,8 +4847,7 @@ fn signer_set_rollover() { initial_balances, |_| {}, |naka_conf| { - for toml in new_signer_configs.clone() { - let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + for signer_config in new_signer_configs.clone() { info!( "---- Adding signer endpoint to naka conf ({}) ----", signer_config.endpoint @@ -4643,9 +4880,8 @@ fn signer_set_rollover() { let short_timeout = Duration::from_secs(20); // Verify that naka_conf has our new signer's event observers - for toml in &new_signer_configs { - let signer_config = SignerConfig::load_from_str(toml).unwrap(); - let endpoint = format!("{}", signer_config.endpoint); + for signer_config in &new_signer_configs { + let endpoint = signer_config.endpoint.to_string(); assert!(signer_test .running_nodes .conf @@ -4689,6 +4925,7 @@ fn signer_set_rollover() { ); submit_tx(&http_origin, &transfer_tx); signer_test.mine_nakamoto_block(short_timeout, true); + signer_test.check_signer_states_normal(); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let block_sighash = mined_block.signer_signature_hash; let signer_signatures = mined_block.signer_signature; @@ -4732,7 +4969,7 @@ fn signer_set_rollover() { .to_rsv(); let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -4763,6 +5000,7 @@ fn signer_set_rollover() { .expect("Timed out waiting for stacking txs to be mined"); signer_test.mine_nakamoto_block(short_timeout, true); + signer_test.check_signer_states_normal(); let next_reward_cycle = reward_cycle.saturating_add(1); @@ -4787,7 +5025,20 @@ fn signer_set_rollover() { assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); } - info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); + info!("---- Mining to just before the next reward cycle (block {next_cycle_height}) -----",); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height.saturating_sub(1), + new_num_signers, + ); + + let (old_spawned_signers, _, _) = signer_test.replace_signers( + new_spawned_signers, + new_signer_private_keys, + new_signer_configs, + ); + + info!("---- Mining into the next reward cycle (block {next_cycle_height}) -----",); signer_test.run_until_burnchain_height_nakamoto( Duration::from_secs(60), next_cycle_height, @@ -4816,6 +5067,7 @@ fn signer_set_rollover() { ); submit_tx(&http_origin, &transfer_tx); signer_test.mine_nakamoto_block(short_timeout, true); + signer_test.check_signer_states_normal(); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); info!("---- Verifying that the new signers signed the block -----"); @@ -4830,7 +5082,7 @@ fn signer_set_rollover() { } signer_test.shutdown(); - for signer in new_spawned_signers { + for signer in old_spawned_signers { assert!(signer.stop().is_none()); } } @@ -5093,6 +5345,7 @@ fn multiple_miners_with_nakamoto_blocks() { &[&rl1_counters, &rl2_counters], Duration::from_secs(30), ); + miners.signer_test.check_signer_states_normal(); btc_blocks_mined += 1; // wait for the new block to be processed @@ -5187,7 +5440,6 @@ fn partial_tenure_fork() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -5246,7 +5498,6 @@ fn partial_tenure_fork() { Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); - let blocks_mined1 = signer_test.running_nodes.counters.naka_mined_blocks.clone(); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); @@ -5285,9 +5536,6 @@ fn partial_tenure_fork() { let rl2_coord_channels = run_loop_2.coordinator_channels(); let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let Counters { - naka_mined_blocks: blocks_mined2, - naka_proposed_blocks: blocks_proposed2, - naka_submitted_commits: commits_2, naka_skip_commit_op: rl2_skip_commit_op, .. } = run_loop_2.counters(); @@ -5295,13 +5543,16 @@ fn partial_tenure_fork() { let rl1_counters = signer_test.running_nodes.counters.clone(); signer_test.boot_to_epoch_3(); + + // Pause block commits from miner 2 to make sure + // miner 1 wins the first block + rl2_skip_commit_op.set(true); + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); - let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - wait_for(200, || { let Some(node_1_info) = get_chain_info_opt(&conf) else { return Ok(false); @@ -5315,29 +5566,6 @@ fn partial_tenure_fork() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - // due to the random nature of mining sortitions, the way this test is structured - // is that we keep track of how many tenures each miner produced, and once enough sortitions - // have been produced such that each miner has produced 3 tenures, we stop and check the - // results at the end - let mut btc_blocks_mined = 0; - let mut miner_1_tenures = 0u64; - let mut miner_2_tenures = 0u64; - let mut fork_initiated = false; - let mut min_miner_1_tenures = u64::MAX; - let mut min_miner_2_tenures = u64::MAX; - let mut ignore_block = 0; - - let mut miner_1_blocks = 0; - let mut miner_2_blocks = 0; - let mut min_miner_2_blocks = 0; - let mut last_sortition_winner: Option = None; - let mut miner_2_won_2_in_a_row = false; - - let commits_1 = signer_test - .running_nodes - .counters - .naka_submitted_commits - .clone(); let rl1_skip_commit_op = signer_test .running_nodes .counters @@ -5364,278 +5592,159 @@ fn partial_tenure_fork() { info!("-------- Miner 2 caught up to miner 1 --------"); - // Pause block commits - rl1_skip_commit_op.set(true); - rl2_skip_commit_op.set(true); - let info_before = get_chain_info(&conf); + info!("-------- Miner 1 starting next tenure --------"); + + wait_for(60, || { + Ok(rl1_counters.naka_submitted_commit_last_burn_height.get() + >= info_before.burn_block_height) + }) + .unwrap(); + info!("-------- Blocking Miner 1 so that Miner 2 will win the next next tenure --------"); + rl1_skip_commit_op.set(true); + // Mine the first block - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 180, - || { - let info_1 = get_chain_info(&conf); - Ok(info_1.stacks_tip_height > info_before.stacks_tip_height) - }, - ) - .expect("Timed out waiting for new Stacks block to be mined"); + signer_test.mine_bitcoin_block(); + signer_test.check_signer_states_normal(); - info!("-------- Mined first block, wait for block commits --------"); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_1)); - let info_before = get_chain_info(&conf); + // Setup miner 2 to ignore a block in this tenure + let ignore_block = info_before.stacks_tip_height + 3; + set_ignore_block(ignore_block, &conf_node_2.node.working_dir); - // Unpause block commits and wait for both miners' commits - rl1_skip_commit_op.set(false); + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + info!( + "Mining interim block #{interim_block_ix} in Miner 1's first tenure (the to-be-forked tenure)"; + ); + + let (_, sender_nonce) = signer_test + .submit_transfer_tx(&sender_sk, send_fee, send_amt) + .unwrap(); + + wait_for(60, || { + Ok(get_account(&http_origin, &sender_addr).nonce > sender_nonce) + }) + .unwrap(); + } + + info!("------- Unblocking Miner 2 ------"); rl2_skip_commit_op.set(false); + wait_for(60, || { + Ok(rl2_counters.naka_submitted_commit_last_burn_height.get() + > info_before.burn_block_height + && rl2_counters.naka_submitted_commit_last_stacks_tip.get() + > info_before.stacks_tip_height) + }) + .unwrap(); + let proposals_before = rl2_counters.naka_proposed_blocks.get(); + let rejections_before = rl2_counters.naka_rejected_blocks.get(); + let peer_info_before = signer_test.get_peer_info(); + info!("------- Miner 2 wins first tenure post-fork ------"); + signer_test.mine_bitcoin_block(); + // Miner 2's tenure is "normal", even though it will end up being rejected by signers because miner 2 + // is trying to reorg Miner 1's tenure + signer_test.check_signer_states_normal(); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_2)); - // Ensure that both miners' commits point at the stacks tip wait_for(60, || { - let last_committed_1 = rl1_counters - .naka_submitted_commit_last_stacks_tip - .load(Ordering::SeqCst); - let last_committed_2 = rl2_counters - .naka_submitted_commit_last_stacks_tip - .load(Ordering::SeqCst); - Ok(last_committed_1 >= info_before.stacks_tip_height - && last_committed_2 >= info_before.stacks_tip_height) + Ok(rl2_counters.naka_proposed_blocks.get() > proposals_before + && rl2_counters.naka_rejected_blocks.get() > rejections_before) }) - .expect("Timed out waiting for block commits"); + .expect("Miner 2 should propose blocks that get rejected"); - while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { - if btc_blocks_mined >= max_nakamoto_tenures { - panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); - } + let peer_info = signer_test.get_peer_info(); + assert_eq!( + peer_info.stacks_tip_height, + peer_info_before.stacks_tip_height + ); + wait_for(60, || { + Ok( + rl2_counters.naka_submitted_commit_last_burn_height.get() + >= peer_info.burn_block_height, + ) + }) + .unwrap(); - // Mine a block and wait for it to be processed, unless we are in a - // forked tenure, in which case, just wait for the block proposal - let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); - let proposed_before_1 = signer_test - .running_nodes - .counters - .naka_proposed_blocks - .load(Ordering::SeqCst); + info!("------- Miner 2 wins second tenure post-fork ------"); + rl2_skip_commit_op.set(true); + signer_test.mine_bitcoin_block(); + info!("------- Unblocking Miner 1 so they can win the next tenure ------"); + rl1_skip_commit_op.set(false); - info!( - "Next tenure checking"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_1" => proposed_before_1, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, - ); + // Miner 2's tenure is an allowed reorg before the prior tenure had no blocks + signer_test.check_signer_states_reorg(&signer_test.signer_test_pks(), &[]); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_2)); - // Pause block commits - rl1_skip_commit_op.set(true); - rl2_skip_commit_op.set(true); + let peer_info = signer_test.get_peer_info(); + assert_eq!( + peer_info.stacks_tip_height, + peer_info_before.stacks_tip_height + ); + wait_for(60, || { + Ok( + rl1_counters.naka_submitted_commit_last_burn_height.get() + >= peer_info.burn_block_height, + ) + }) + .unwrap(); - let tip_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let commits_before_1 = commits_1.load(Ordering::SeqCst); - let commits_before_2 = commits_2.load(Ordering::SeqCst); + rl1_skip_commit_op.set(true); + info!("------- Miner 1 wins the third tenure post-fork ------"); + signer_test.mine_bitcoin_block(); + info!("------- Unblocking Miner 2 so they can win the next tenure ------"); + rl2_skip_commit_op.set(false); + signer_test.check_signer_states_reorg(&signer_test.signer_test_pks(), &[]); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_1)); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - - Ok((fork_initiated && proposed_2 > proposed_before_2) - || mined_1 > mined_before_1 - || mined_2 > mined_before_2) - }, - ) - .expect("Timed out waiting for tenure change Stacks block"); - btc_blocks_mined += 1; + for interim_block_ix in 0..inter_blocks_per_tenure { + info!( + "Mining interim block #{interim_block_ix} in Miner 1's first tenure (the to-be-forked tenure)"; + ); - // Unpause block commits - info!("Unpausing block commits"); - rl1_skip_commit_op.set(false); - rl2_skip_commit_op.set(false); + let (_, sender_nonce) = signer_test + .submit_transfer_tx(&sender_sk, send_fee, send_amt) + .unwrap(); - // Wait for the block to be processed and the block commits to be submitted wait_for(60, || { - let tip_after = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - // Ensure that both block commits have been sent before continuing - let commits_after_1 = commits_1.load(Ordering::SeqCst); - let commits_after_2 = commits_2.load(Ordering::SeqCst); - Ok(commits_after_1 > commits_before_1 - && commits_after_2 > commits_before_2 - && tip_after.consensus_hash != tip_before.consensus_hash) + Ok(get_account(&http_origin, &sender_addr).nonce > sender_nonce) }) - .expect("Sortition DB tip did not change"); - - let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - info!("tip_after: {:?}", tip_sn); - let miner = match tip_sn.miner_pk_hash { - Some(pk_hash) => { - if pk_hash == mining_pkh_1 { - 1 - } else { - 2 - } - } - None => { - panic!("No sortition found"); - } - }; - info!("Next tenure mined by miner {miner}"); - - if let Some(last_sortition_winner) = last_sortition_winner { - if last_sortition_winner == miner && miner == 2 { - miner_2_won_2_in_a_row = true; - } else { - miner_2_won_2_in_a_row = false; - } - } - last_sortition_winner = Some(miner); - - if miner == 1 && miner_1_tenures == 0 { - // Setup miner 2 to ignore a block in this tenure - ignore_block = pre_nakamoto_peer_1_height - + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) - + 3; - set_ignore_block(ignore_block, &conf_node_2.node.working_dir); - - // Ensure that miner 2 runs at least one more tenure - min_miner_2_tenures = miner_2_tenures + 1; - fork_initiated = true; - min_miner_2_blocks = miner_2_blocks; - } - if miner == 2 && miner_2_tenures == min_miner_2_tenures { - // This is the forking tenure. Ensure that miner 1 runs one more - // tenure after this to validate that it continues to build off of - // the proper block. - min_miner_1_tenures = miner_1_tenures + 1; - } - - let mut blocks = inter_blocks_per_tenure; - // mine (or attempt to mine) the interim blocks - for interim_block_ix in 0..inter_blocks_per_tenure { - let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); - - info!( - "Mining interim blocks"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, - ); - - // submit a tx so that the miner will mine an extra block - let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - // This may fail if the forking miner wins too many tenures and this account's - // nonces get too high (TooMuchChaining) - match submit_tx_fallible(&http_origin, &transfer_tx) { - Ok(_) => { - wait_for(60, || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - - Ok((fork_initiated && proposed_2 > proposed_before_2) - || mined_1 > mined_before_1 - || mined_2 > mined_before_2 - // Special case where neither miner can mine a block: - || (fork_initiated && miner_2_won_2_in_a_row)) - }) - .expect("Timed out waiting for interim block to be mined"); - } - Err(e) => { - if e.to_string().contains("TooMuchChaining") { - info!("TooMuchChaining error, skipping block"); - blocks = interim_block_ix; - break; - } else { - panic!("Failed to submit tx: {e}"); - } - } - } - info!("Attempted to mine interim block {btc_blocks_mined}:{interim_block_ix}"); - } - - if miner == 1 { - miner_1_tenures += 1; - miner_1_blocks += blocks; - } else { - miner_2_tenures += 1; - miner_2_blocks += blocks; - } - - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - - info!( - "Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}, Miner 1 before: {mined_before_1}, Miner 2 before: {mined_before_2}, Miner 1 blocks: {mined_1}, Miner 2 blocks: {mined_2}", - ); - - if miner == 1 { - assert_eq!(mined_1, mined_before_1 + blocks + 1); - } else if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + blocks + 1); - } else { - // Miner 2 should have mined 0 blocks after the fork - assert_eq!(mined_2, mined_before_2); - } + .unwrap(); } - info!( - "New chain info 1: {:?}", - get_chain_info(&signer_test.running_nodes.conf) - ); + info!("------- Miner 2 wins the fourth tenure post-fork ------"); + let proposals_before = rl2_counters.naka_proposed_blocks.get(); + let mined_before = rl2_counters.naka_mined_blocks.get(); + let peer_info_before = signer_test.get_peer_info(); + signer_test.mine_bitcoin_block(); + // now, miner 2 is reorging an entire miner 1 tenure, which should lead + // the signer set to treat miner 2's reorg as rejected. + signer_test.check_signer_states_reorg(&[], &signer_test.signer_test_pks()); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_2)); - info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + wait_for(60, || { + Ok(rl2_counters.naka_proposed_blocks.get() > proposals_before) + }) + .expect("Miner 2 should propose blocks that get rejected"); - let peer_1_height = get_chain_info(&conf).stacks_tip_height; - let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - assert_eq!(peer_2_height, ignore_block - 1); - // The height may be higher than expected due to extra transactions waiting - // to be mined during the forking miner's tenure. - // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure - // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 - // before the fork was initiated - assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); - assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); + wait_for(120, || { + Ok(signer_test.get_peer_info().stacks_tip_height > peer_info_before.stacks_tip_height) + }) + .expect("Miner 1 should submit a tenure extend and have it globally accepted"); - let sortdb = SortitionDB::open( - &conf_node_2.get_burn_db_file_path(), - false, - conf_node_2.get_burnchain().pox_constants, - ) - .unwrap(); + assert_eq!( + mined_before, + rl2_counters.naka_mined_blocks.get(), + "Miner 2 should not have mined any new blocks" + ); - let (chainstate, _) = StacksChainState::open( - false, - conf_node_2.burnchain.chain_id, - &conf_node_2.get_chainstate_path_str(), - None, - ) - .unwrap(); - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - assert_eq!(tip.stacks_block_height, ignore_block - 1); rl2_coord_channels .lock() .expect("Mutex poisoned") @@ -5985,11 +6094,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { None, None, ); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = signer_test.signer_test_pks(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); @@ -6159,11 +6264,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { None, None, ); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = signer_test.signer_test_pks(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); @@ -6383,11 +6484,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Submitted tx {tx} in to attempt to mine block N+1"); let block_n_1 = wait_for_block_proposal(30, info_before.stacks_tip_height + 1, &miner_pk) .expect("Timed out waiting for block N+1 to be proposed"); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = signer_test.signer_test_pks(); wait_for_block_global_acceptance_from_signers( 30, &block_n_1.header.signer_signature_hash(), @@ -6544,12 +6641,7 @@ fn continue_after_fast_block_no_sortition() { let burnchain = conf_1.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - let all_signers = miners - .signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = miners.signer_test.signer_test_pks(); let get_burn_height = || { SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6837,11 +6929,7 @@ fn signing_in_0th_tenure_of_reward_cycle() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let signer_public_keys = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let signer_public_keys = signer_test.signer_test_pks(); let long_timeout = Duration::from_secs(200); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -7109,11 +7197,7 @@ fn block_commit_delay() { .expect("Timed out waiting for block commit after new Stacks block"); // Prevent a block from being mined by making signers reject it. - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = signer_test.signer_test_pks(); TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); info!("------------------------- Test Mine Burn Block -------------------------"); @@ -7379,11 +7463,7 @@ fn block_validation_check_rejection_timeout_heuristic() { ); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = signer_test.signer_test_pks(); signer_test.boot_to_epoch_3(); @@ -7564,7 +7644,7 @@ fn block_validation_pending_table() { .expect("Failed to get pending block validations"); info!( "----- Waiting for pending block proposal in SignerDB -----"; - "proposed_signer_signature_hash" => block_signer_signature_hash.to_hex(), + "proposed_block_signer_signature_hash" => block_signer_signature_hash.to_hex(), "pending_block_validations_len" => pending_block_validations.len(), "pending_block_validations" => pending_block_validations.iter() .map(|p| p.signer_signature_hash.to_hex()) @@ -7601,11 +7681,7 @@ fn block_validation_pending_table() { .expect("Timed out waiting for pending block validation to be removed"); // for test cleanup we need to wait for block rejections - let signer_keys = signer_test - .signer_configs - .iter() - .map(|c| StacksPublicKey::from_private(&c.stacks_private_key)) - .collect::>(); + let signer_keys = signer_test.signer_test_pks(); wait_for_block_rejections_from_signers(30, &block.header.signer_signature_hash(), &signer_keys) .expect("Timed out waiting for block rejections"); @@ -8198,7 +8274,7 @@ fn block_proposal_max_age_rejections() { let short_timeout = Duration::from_secs(30); info!("------------------------- Send Block Proposal To Signers -------------------------"); - let info_before = get_chain_info(&signer_test.running_nodes.conf); + let _ = get_chain_info(&signer_test.running_nodes.conf); let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), txs: vec![], @@ -8227,47 +8303,53 @@ fn block_proposal_max_age_rejections() { info!("------------------------- Test Block Proposal Rejected -------------------------"); // Verify the signers rejected only the SECOND block proposal. The first was not even processed. - wait_for(30, || { - let rejections = test_observer::get_stackerdb_chunks() + wait_for(120, || { + let mut status_map = HashMap::new(); + for chunk in test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) - .map(|chunk| { - let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - else { - return None; - }; - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { - signer_signature_hash, - signature, - .. - })) => { - assert_eq!( - signer_signature_hash, block_signer_signature_hash_2, - "We should only reject the second block" - ); - Some(signature) - } - SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { - signer_signature_hash, - .. - })) => { - assert_ne!( - signer_signature_hash, block_signer_signature_hash_1, - "We should never have accepted block" - ); - None - } - _ => None, + { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + .. + })) => { + let entry = status_map.entry(signer_signature_hash).or_insert((0, 0)); + entry.0 += 1; } - }); - Ok(rejections.count() > num_signers * 7 / 10) + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) => { + let entry = status_map.entry(signer_signature_hash).or_insert((0, 0)); + entry.1 += 1; + } + _ => continue, + } + } + let block_1_status = status_map + .get(&block_signer_signature_hash_1) + .cloned() + .unwrap_or((0, 0)); + assert_eq!(block_1_status, (0, 0)); + + let block_2_status = status_map + .get(&block_signer_signature_hash_2) + .cloned() + .unwrap_or((0, 0)); + assert_eq!(block_2_status.1, 0, "Block 2 should always be rejected"); + + info!("Block 2 status"; + "accepted" => %block_2_status.1, "rejected" => %block_2_status.0 + ); + Ok(block_2_status.0 > num_signers * 7 / 10) }) .expect("Timed out waiting for block rejections"); - info!("------------------------- Test Peer Info-------------------------"); - assert_eq!(info_before, get_chain_info(&signer_test.running_nodes.conf)); - info!("------------------------- Test Shutdown-------------------------"); signer_test.shutdown(); } @@ -8328,11 +8410,7 @@ fn global_acceptance_depends_on_block_announcement() { None, ); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = signer_test.signer_test_pks(); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -8416,28 +8494,22 @@ fn global_acceptance_depends_on_block_announcement() { ); TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); - TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); TEST_IGNORE_SIGNERS.set(false); - TEST_SKIP_BLOCK_BROADCAST.set(false); test_observer::clear(); - let info_before = signer_test.get_peer_info(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let info = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info.stacks_tip_height > info_before.stacks_tip_height - && info_before.stacks_tip_consensus_hash != info.stacks_tip_consensus_hash) - }, - ) - .expect("Stacks miner failed to produce new blocks during the newest burn block's tenure"); - let sister_block = - wait_for_block_pushed_by_miner_key(30, info_before.stacks_tip_height + 1, &miner_pk) - .expect("Timed out waiting for block N+1' to be mined"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + let sister_block = wait_for_block_proposal(30, info_before.stacks_tip_height + 1, &miner_pk) + .expect("Timed out waiting for block N+1' to be proposed"); + + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); + TEST_SKIP_BLOCK_BROADCAST.set(false); + + wait_for_block_pushed(30, sister_block.header.signer_signature_hash()) + .expect("Timed out waiting for block N+1' to be mined"); assert_ne!( sister_block.header.signer_signature_hash(), block_n_1.header.signer_signature_hash() @@ -8753,7 +8825,7 @@ fn incoming_signers_ignore_block_proposals() { info!("------------------------- Test Mine Until Middle of Prepare Phase at Block Height {middle_of_prepare_phase} -------------------------"); signer_test.run_until_burnchain_height_nakamoto(timeout, middle_of_prepare_phase, num_signers); - signer_test.wait_for_registered_both_reward_cycles(30); + signer_test.wait_for_registered_both_reward_cycles(); let current_burnchain_height = signer_test .running_nodes @@ -8841,11 +8913,7 @@ fn incoming_signers_ignore_block_proposals() { info!("------------------------- Test Attempt to Mine Invalid Block {signer_signature_hash_1} -------------------------"); let short_timeout = Duration::from_secs(30); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = signer_test.signer_test_pks(); test_observer::clear(); // Propose a block to the signers that passes initial checks but will be rejected by the stacks node @@ -8929,7 +8997,7 @@ fn outgoing_signers_ignore_block_proposals() { info!("------------------------- Test Mine Until Next Reward Cycle at Height {next_reward_cycle_height} -------------------------"); signer_test.run_until_burnchain_height_nakamoto(timeout, next_reward_cycle_height, num_signers); - signer_test.wait_for_registered_both_reward_cycles(30); + signer_test.wait_for_registered_both_reward_cycles(); let current_burnchain_height = signer_test .running_nodes @@ -9227,7 +9295,7 @@ fn injected_signatures_are_ignored_across_boundaries() { .to_rsv(); let signer_pk = Secp256k1PublicKey::from_private(&new_signer_private_key); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( &new_signer_private_key, 0, 1000, @@ -9287,7 +9355,7 @@ fn injected_signatures_are_ignored_across_boundaries() { &signer_test.running_nodes.counters.blocks_processed, ); - signer_test.wait_for_registered_both_reward_cycles(60); + signer_test.wait_for_registered_both_reward_cycles(); info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); signer_test.run_until_burnchain_height_nakamoto( @@ -9848,11 +9916,11 @@ fn fast_sortition() { #[test] #[ignore] /// This test spins up two nakamoto nodes, both configured to mine. -/// After Nakamoto blocks are mined, it waits for a normal tenure, then issues +/// After Nakamoto blocks are mined, it issues a normal tenure, then issues /// two bitcoin blocks in quick succession -- the first will contain block commits, /// and the second "flash block" will contain no block commits. -/// The test checks if the winner of the first block is different than the previous tenure. -/// If so, it performs the actual test: asserting that the miner wakes up and produces valid blocks. +/// The test asserts that the winner of the first block is different than the previous tenure. +/// and performs the actual test: asserting that the miner wakes up and produces valid blocks. /// This test uses the burn-block-height to ensure consistent calculation of the burn view between /// the miner thread and the block processor fn multiple_miners_empty_sortition() { @@ -9861,166 +9929,122 @@ fn multiple_miners_empty_sortition() { } let num_signers = 5; - let mut miners = MultipleMinerTest::new(num_signers, 60); + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + 60, + |signer_config| { + // We don't want the miner of the "inactive" sortition before the flash block + // to get timed out. + signer_config.block_proposal_timeout = Duration::from_secs(600); + }, + |_| {}, + |_| {}, + ); - let (conf_1, conf_2) = miners.get_node_configs(); + let (conf_1, _conf_2) = miners.get_node_configs(); - let rl1_commits = miners - .signer_test - .running_nodes - .counters - .naka_submitted_commits - .clone(); let rl1_counters = miners.signer_test.running_nodes.counters.clone(); - let rl2_commits = miners.rl2_counters.naka_submitted_commits.clone(); - let rl2_counters = miners.rl2_counters.clone(); + let sortdb = SortitionDB::open( + &conf_1.get_burn_db_file_path(), + false, + conf_1.get_burnchain().pox_constants, + ) + .unwrap(); - let sender_addr = tests::to_addr(&miners.sender_sk); + miners.pause_commits_miner_2(); + let (mining_pkh_1, mining_pkh_2) = miners.get_miner_public_key_hashes(); miners.boot_to_epoch_3(); - let burn_height_contract = " - (define-data-var local-burn-block-ht uint u0) - (define-public (run-update) - (ok (var-set local-burn-block-ht burn-block-height))) - "; - - let contract_tx = make_contract_publish( - &miners.sender_sk, - miners.sender_nonce, - 1000, - conf_1.burnchain.chain_id, - "burn-height-local", - burn_height_contract, - ); - submit_tx(&conf_1.node.data_url, &contract_tx); - miners.sender_nonce += 1; - - let last_sender_nonce = loop { - // Mine 1 nakamoto tenures - info!("Mining tenure..."); - - miners.signer_test.mine_block_wait_on_processing( - &[&conf_1, &conf_2], - &[&rl1_counters, &rl2_counters], - Duration::from_secs(30), - ); + let info = get_chain_info(&conf_1); - // mine the interim blocks - for _ in 0..2 { - let sender_nonce = get_account(&conf_1.node.data_url, &sender_addr).nonce; - // check if the burn contract is already produced, if not wait for it to be included in - // an interim block - if sender_nonce >= 1 { - let contract_call_tx = make_contract_call( - &miners.sender_sk, - sender_nonce, - miners.send_fee, - conf_1.burnchain.chain_id, - &sender_addr, - "burn-height-local", - "run-update", - &[], - ); - submit_tx(&conf_1.node.data_url, &contract_call_tx); - } + miners + .signer_test + .submit_burn_block_contract_and_wait(&miners.sender_sk) + .expect("Timed out waiting for contract publish"); - // make sure the sender's tx gets included (whether it was the contract publish or call) - wait_for(60, || { - let next_sender_nonce = get_account(&conf_1.node.data_url, &sender_addr).nonce; - Ok(next_sender_nonce > sender_nonce) - }) - .unwrap(); - } + wait_for(60, || { + Ok( + rl1_counters.naka_submitted_commit_last_burn_height.get() >= info.burn_block_height + && rl1_counters.naka_submitted_commit_last_stacks_tip.get() + >= info.stacks_tip_height, + ) + }) + .expect("Timed out waiting for commits from Miner 1 for Tenure 1 of the test"); - let last_active_sortition = get_sortition_info(&conf_1); - assert!(last_active_sortition.was_sortition); + for _ in 0..2 { + miners + .signer_test + .submit_burn_block_call_and_wait(&miners.sender_sk) + .expect("Timed out waiting for contract-call"); + } - // check if we're about to cross a reward cycle boundary -- if so, we can't - // perform this test, because we can't tenure extend across the boundary - let pox_info = get_pox_info(&conf_1.node.data_url).unwrap(); - let blocks_until_next_cycle = pox_info.next_cycle.blocks_until_reward_phase; - if blocks_until_next_cycle == 1 { - info!("We're about to cross a reward cycle boundary, cannot perform a tenure extend here!"); - continue; - } + let tenure_0_stacks_height = get_chain_info(&conf_1).stacks_tip_height; + miners.pause_commits_miner_1(); + miners.signer_test.mine_bitcoin_block(); + miners.signer_test.check_signer_states_normal(); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_1)); - // lets mine a btc flash block - let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); - let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); - let info_before = get_chain_info(&conf_1); + wait_for(60, || { + Ok(get_chain_info(&conf_1).stacks_tip_height > tenure_0_stacks_height) + }) + .expect("Timed out waiting for Miner 1 to mine the first block of Tenure 1"); + miners.submit_commit_miner_2(&sortdb); - miners.btc_regtest_controller_mut().build_next_block(2); + for _ in 0..2 { + miners + .signer_test + .submit_burn_block_call_and_wait(&miners.sender_sk) + .expect("Timed out waiting for contract-call"); + } - wait_for(60, || { - let info = get_chain_info(&conf_1); - Ok(info.burn_block_height >= 2 + info_before.burn_block_height - && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before - && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) - }) - .unwrap(); + let last_active_sortition = get_sortition_info(&conf_1); + assert!(last_active_sortition.was_sortition); - let cur_empty_sortition = get_sortition_info(&conf_1); - assert!(!cur_empty_sortition.was_sortition); - let inactive_sortition = get_sortition_info_ch( - &conf_1, - cur_empty_sortition.last_sortition_ch.as_ref().unwrap(), - ); - assert!(inactive_sortition.was_sortition); - assert_eq!( - inactive_sortition.burn_block_height, - last_active_sortition.burn_block_height + 1 - ); + let tenure_1_info = get_chain_info(&conf_1); + info!("Mining flash block!"); + miners.btc_regtest_controller_mut().build_next_block(2); - info!("==================== Mined a flash block ===================="); - info!("Flash block sortition info"; - "last_active_winner" => ?last_active_sortition.miner_pk_hash160, - "last_winner" => ?inactive_sortition.miner_pk_hash160, - "last_active_ch" => %last_active_sortition.consensus_hash, - "last_winner_ch" => %inactive_sortition.consensus_hash, - "cur_empty_sortition" => %cur_empty_sortition.consensus_hash, - ); + wait_for(60, || { + let info = get_chain_info(&conf_1); + Ok(info.burn_block_height >= 2 + tenure_1_info.burn_block_height) + }) + .expect("Timed out waiting for the flash blocks to be processed by the stacks nodes"); - if last_active_sortition.miner_pk_hash160 != inactive_sortition.miner_pk_hash160 { - info!( - "==================== Mined a flash block with changed miners ====================" - ); - break get_account(&conf_1.node.data_url, &sender_addr).nonce; - } - }; + let cur_empty_sortition = get_sortition_info(&conf_1); + assert!(!cur_empty_sortition.was_sortition); + let inactive_sortition = get_sortition_info_ch( + &conf_1, + cur_empty_sortition.last_sortition_ch.as_ref().unwrap(), + ); + assert!(inactive_sortition.was_sortition); + assert_eq!( + inactive_sortition.burn_block_height, + last_active_sortition.burn_block_height + 1 + ); + assert_eq!( + inactive_sortition.miner_pk_hash160, + Some(mining_pkh_2), + "Miner 2 should have won the inactive sortition" + ); // after the flash block, make sure we get block processing without a new bitcoin block // being mined. - for _ in 0..2 { - let sender_nonce = get_account(&conf_1.node.data_url, &sender_addr).nonce; - let contract_call_tx = make_contract_call( - &miners.sender_sk, - sender_nonce, - miners.send_fee, - conf_1.burnchain.chain_id, - &sender_addr, - "burn-height-local", - "run-update", - &[], - ); - submit_tx(&conf_1.node.data_url, &contract_call_tx); - - wait_for(60, || { - let next_sender_nonce = get_account(&conf_1.node.data_url, &sender_addr).nonce; - Ok(next_sender_nonce > sender_nonce) - }) - .unwrap(); + miners + .signer_test + .submit_burn_block_call_and_wait(&miners.sender_sk) + .expect("Timed out waiting for contract-call"); } - assert_eq!( - get_account(&conf_1.node.data_url, &sender_addr).nonce, - last_sender_nonce + 2, - "The last two transactions after the flash block must be included in a block" - ); - miners.shutdown(); -} + miners + .signer_test + .check_signer_states_normal_missed_sortition(); + + miners.shutdown(); +} #[test] #[ignore] @@ -10051,21 +10075,9 @@ fn single_miner_empty_sortition() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - let burn_height_contract = " - (define-data-var local-burn-block-ht uint u0) - (define-public (run-update) - (ok (var-set local-burn-block-ht burn-block-height))) - "; - - let contract_tx = make_contract_publish( - &sender_sk, - 0, - 1000, - conf.burnchain.chain_id, - "burn-height-local", - burn_height_contract, - ); - submit_tx(&conf.node.data_url, &contract_tx); + signer_test + .submit_burn_block_contract_and_wait(&sender_sk) + .expect("Timed out waiting for contract publish"); let rl1_commits = signer_test .running_nodes @@ -10087,29 +10099,9 @@ fn single_miner_empty_sortition() { // mine the interim blocks for _ in 0..2 { - let sender_nonce = get_account(&conf.node.data_url, &sender_addr).nonce; - // check if the burn contract is already produced, if not wait for it to be included in - // an interim block - if sender_nonce >= 1 { - let contract_call_tx = make_contract_call( - &sender_sk, - sender_nonce, - send_fee, - conf.burnchain.chain_id, - &sender_addr, - "burn-height-local", - "run-update", - &[], - ); - submit_tx(&conf.node.data_url, &contract_call_tx); - } - - // make sure the sender's tx gets included (whether it was the contract publish or call) - wait_for(60, || { - let next_sender_nonce = get_account(&conf.node.data_url, &sender_addr).nonce; - Ok(next_sender_nonce > sender_nonce) - }) - .unwrap(); + signer_test + .submit_burn_block_call_and_wait(&sender_sk) + .expect("Timed out waiting for contract-call"); } let last_active_sortition = get_sortition_info(&conf); @@ -10718,12 +10710,7 @@ fn interrupt_miner_on_new_stacks_tip() { let (miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); - let all_signers: Vec<_> = miners - .signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = miners.signer_test.signer_test_pks(); // Pause Miner 2's commits to ensure Miner 1 wins the first sortition. skip_commit_op_rl2.set(true); @@ -10747,7 +10734,7 @@ fn interrupt_miner_on_new_stacks_tip() { TEST_SKIP_BLOCK_BROADCAST.set(true); // submit a tx so that the miner will mine a stacks block - let tx = miners.send_transfer_tx(); + let (tx, _) = miners.send_transfer_tx(); // Wait for the block with this transfer to be accepted wait_for(30, || { Ok(test_observer::get_mined_nakamoto_blocks() @@ -11948,12 +11935,7 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { config.miner.block_commit_delay = Duration::from_secs(0); }, ); - let all_signers = miners - .signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = miners.signer_test.signer_test_pks(); let mut approving_signers = vec![]; let mut rejecting_signers = vec![]; for (i, signer_config) in miners.signer_test.signer_configs.iter().enumerate() { @@ -11997,6 +11979,8 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 30) .expect("Failed to mine BTC block followed by Block N"); verify_sortition_winner(&sortdb, &miner_pkh_1); + miners.signer_test.check_signer_states_normal(); + let block_n = wait_for_block_pushed_by_miner_key(30, info_before.stacks_tip_height + 1, &miner_pk_1) .expect("Failed to get block N"); @@ -12022,6 +12006,7 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 30) .expect("Failed to mine BTC block"); verify_sortition_winner(&sortdb, &miner_pkh_2); + miners.signer_test.check_signer_states_normal(); info!("------------------------- Miner 1 Submits a Block Commit -------------------------"); miners.submit_commit_miner_1(&sortdb); @@ -12044,11 +12029,16 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { miners .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 30) .expect("Failed to mine BTC block"); + let block_n_1_prime = wait_for_block_proposal(30, block_n_height + 1, &miner_pk_1) .expect("Failed to get block proposal N+1'"); // Stall the miner from proposing again until we're ready TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1]); + miners + .signer_test + .check_signer_states_reorg(&approving_signers, &rejecting_signers); + info!("------------------------- Wait for 3 acceptances and 2 rejections -------------------------"); let signer_signature_hash = block_n_1_prime.header.signer_signature_hash(); wait_for_block_acceptance_from_signers(30, &signer_signature_hash, &approving_signers) @@ -12085,3 +12075,1304 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { } miners.shutdown(); } + +#[test] +#[ignore] +/// This test checks that the miner ignore repeat block rejections. +fn repeated_rejection() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 3)]); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + let proposed_blocks = signer_test + .running_nodes + .counters + .naka_proposed_blocks + .clone(); + + signer_test.mine_nakamoto_block(Duration::from_secs(60), true); + + // make signer[0] reject all proposals and to repeat the rejection + let rejecting_signer = + StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[0]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![rejecting_signer]); + TEST_REPEAT_PROPOSAL_RESPONSE.set(vec![rejecting_signer]); + + // make signer[1] ignore all proposals + let ignoring_signer = StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[1]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ignoring_signer]); + + let proposals_before = proposed_blocks.load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(60, || { + if proposed_blocks.load(Ordering::SeqCst) > proposals_before { + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for block proposal"); + + let proposals_after = proposed_blocks.load(Ordering::SeqCst); + info!("Block proposed, verifying that it is not rejected"); + + // Ensure that the miner does not propose any more blocks + _ = wait_for(60, || { + assert_eq!( + proposed_blocks.load(Ordering::SeqCst), + proposals_after, + "Miner proposed another block" + ); + Ok(false) + }); + + signer_test.shutdown(); +} + +fn transfers_in_block(block: &serde_json::Value) -> usize { + let transactions = block["transactions"].as_array().unwrap(); + let mut count = 0; + for tx in transactions { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::TokenTransfer(..) = &parsed.payload { + // don't count phantom unlock transactions (identified as transfers from the boot addr) + if !parsed.get_origin().address_testnet().is_boot_code_addr() { + count += 1; + } + } + } + count +} + +#[test] +#[ignore] +/// This test verifies that a miner will re-propose the same block if it times +/// out waiting for signers to reach consensus on the block. +/// +/// Spins +fn retry_proposal() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * 3)], + |_| {}, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(123)); + config + .miner + .block_rejection_timeout_steps + .insert(10, Duration::from_secs(20)); + config + .miner + .block_rejection_timeout_steps + .insert(15, Duration::from_secs(10)); + config + .miner + .block_rejection_timeout_steps + .insert(20, Duration::from_secs(30)); + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + let proposed_blocks = signer_test + .running_nodes + .counters + .naka_proposed_blocks + .clone(); + + signer_test.mine_nakamoto_block(Duration::from_secs(60), true); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let block_height_before = info.stacks_tip_height; + + // make signer[0] reject all proposals + let rejecting_signer = + StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[0]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![rejecting_signer]); + + // make signer[1] ignore all proposals + let ignoring_signer = StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[1]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ignoring_signer]); + + let proposals_before = proposed_blocks.load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(60, || { + if proposed_blocks.load(Ordering::SeqCst) > proposals_before { + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for block proposal"); + + info!( + "Block proposed, submitting another transaction that should not get included in the block" + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 1, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Disable signer 1 from ignoring proposals"); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); + + info!("Waiting for the block to be approved"); + wait_for(60, || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().expect("No blocks found"); + let height = last_block["block_height"].as_u64().unwrap(); + if height > block_height_before { + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for block"); + + // Ensure that the block was the original block with just 1 transfer + let blocks = test_observer::get_blocks(); + let block = blocks.last().expect("No blocks found"); + assert_eq!(transfers_in_block(block), 1); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test verifies that a a signer will accept a rejected block if it is +/// re-proposed and determined to be legitimate. This can happen if the block +/// is initially rejected due to a test flag or because the stacks-node had +/// not yet processed the block's parent. +fn signer_can_accept_rejected_block() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * 3)], + |_| {}, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(123)); + config + .miner + .block_rejection_timeout_steps + .insert(10, Duration::from_secs(20)); + config + .miner + .block_rejection_timeout_steps + .insert(15, Duration::from_secs(10)); + config + .miner + .block_rejection_timeout_steps + .insert(20, Duration::from_secs(30)); + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); + + signer_test.boot_to_epoch_3(); + + signer_test.mine_nakamoto_block(Duration::from_secs(60), true); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let block_height_before = info.stacks_tip_height; + + // make signer[0] reject all proposals + let rejecting_signer = + StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[0]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![rejecting_signer]); + + // make signer[1] ignore all proposals + let ignoring_signer = StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[1]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ignoring_signer]); + + // Stall block validation so we can ensure the timing we want to test + TEST_VALIDATE_STALL.set(true); + + // submit a tx so that the miner will mine a block + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + let block = wait_for_block_proposal(30, block_height_before + 1, &miner_pk) + .expect("Timed out waiting for block proposal"); + let expected_block_height = block.header.chain_length; + + // Wait for signer[0] to reject the block + wait_for_block_rejections(30, block.header.signer_signature_hash(), 1) + .expect("Failed to get expected rejections for Miner 1's block"); + + info!("Disable signer 0 from rejecting proposals"); + test_observer::clear(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); + + // Unstall the other signers + TEST_VALIDATE_STALL.set(false); + + info!( + "Block proposed, submitting another transaction that should not get included in the block" + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 1, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Waiting for the block to be approved"); + wait_for(60, || { + let blocks = test_observer::get_blocks(); + + // Look for a block with expected height + let Some(block) = blocks + .iter() + .find(|block| block["block_height"].as_u64() == Some(expected_block_height)) else { + return Ok(false) // Keep waiting if the block hasn't appeared yet + }; + + let transfers_included_in_block = transfers_in_block(block); + if transfers_included_in_block == 1 { + Ok(true) // Success: found the block with exactly 1 transfer + } else { + Err(format!("Unexpected amount of transfers included in block. Found: {transfers_included_in_block}")) + } + }) + .expect("Timed out waiting for block"); + + signer_test.shutdown(); +} + +/// Test a scenario where: +/// Two miners boot to Nakamoto (first miner has max_execution_time set to 0). +/// Sortition occurs. Miner 1 wins. +/// Miner 1 successfully mines block N with contract-publish +/// Miner 1 successfully mines block N+1 with transfer and a contract-call that gets rejected (by max_execution_time) +/// Miner 1 successfully mines block N+2 with transfer tx (this is mainly for ensuring everything still works after the expiration time) +/// Sortition occurs. Miner 2 wins. +/// Miner 2 successfully mines block N+3 including the contract-call previously rejected by miner 1 +/// Ensures both the miners are aligned +#[test] +#[ignore] +fn miner_rejection_by_contract_call_execution_time_expired() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let num_txs = 3; + + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + num_txs, + |signer_config| { + // Lets make sure we never time out since we need to stall some things to force our scenario + signer_config.block_proposal_validation_timeout = Duration::from_secs(1800); + signer_config.tenure_last_block_proposal_timeout = Duration::from_secs(1800); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(1800); + }, + |config| config.miner.max_execution_time_secs = Some(0), + |config| config.miner.max_execution_time_secs = None, + ); + let rl1_skip_commit_op = miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone(); + let rl2_skip_commit_op = miners.rl2_counters.naka_skip_commit_op.clone(); + + let (conf_1, _) = miners.get_node_configs(); + let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); + let (_miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + miners.boot_to_epoch_3(); + + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + info!("------------------------- Pause Miner 1's Block Commits -------------------------"); + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Mines a Nakamoto Block N -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by Block N"); + + miners.wait_for_test_observer_blocks(60); + + // First, lets deploy the contract + let dummy_contract_src = "(define-public (dummy (number uint)) (begin (ok (+ number u1))))"; + + let sender_nonce = 0; + + let _ = miners + .send_and_mine_contract_publish(sender_nonce, "dummy-contract", dummy_contract_src, 60) + .expect("Failed to publish contract in a new block"); + + info!("------------------------- Miner 1 Mines a Nakamoto Block N+1 -------------------------"); + + let stacks_height_before = miners.get_peer_stacks_tip_height(); + + let (tx1, sender_nonce) = miners.send_transfer_tx(); + + // try calling the contract (has to fail) + let contract_call_txid = miners.send_contract_call( + sender_nonce + 1, + "dummy-contract", + "dummy", + &[clarity::vm::Value::UInt(1)], + ); + + let _ = wait_for(60, || { + Ok(miners.get_peer_stacks_tip_height() > stacks_height_before) + }); + + miners.wait_for_test_observer_blocks(60); + + assert_eq!(last_block_contains_txid(&tx1), true); + assert_eq!(last_block_contains_txid(&contract_call_txid), false); + + info!("------------------------- Miner 1 Mines a Nakamoto Block N+2 -------------------------"); + + let tx2 = miners + .send_and_mine_transfer_tx(60) + .expect("Failed to mine N + 2"); + + miners.wait_for_test_observer_blocks(60); + + assert_eq!(last_block_contains_txid(&tx2), true); + + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!("------------------------- Miner 2 Submits a Block Commit -------------------------"); + miners.submit_commit_miner_2(&sortdb); + + info!("------------------------- Mine Tenure -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by Block N+3"); + + info!("------------------------- Miner 2 Mines Block N+3 -------------------------"); + + let stacks_height_before = miners.get_peer_stacks_tip_height(); + + let contract_call_txid = miners.send_contract_call( + sender_nonce + 2, + "dummy-contract", + "dummy", + &[clarity::vm::Value::UInt(1)], + ); + + let _ = wait_for_block_pushed_by_miner_key(30, stacks_height_before + 1, &miner_pk_2) + .expect("Failed to get block N+3"); + + miners.wait_for_test_observer_blocks(60); + + assert_eq!(last_block_contains_txid(&contract_call_txid), true); + + verify_sortition_winner(&sortdb, &miner_pkh_2); + + // ensure both miners are aligned + miners.wait_for_chains(60); + + info!("------------------------- Shutdown -------------------------"); + miners.shutdown(); +} + +/// Test a scenario where: +/// Two miners boot to Nakamoto (first miner has max_execution_time set to 0). +/// Sortition occurs. Miner 1 wins. +/// Miner 1 fails to mine block N with contract-publish +/// Sortition occurs. Miner 2 wins. +/// Miner 2 successfully mines block N including the contract-publish previously rejected by miner 1 +/// Ensures both the miners are aligned +#[test] +#[ignore] +fn miner_rejection_by_contract_publish_execution_time_expired() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let num_txs = 3; + + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + num_txs, + |signer_config| { + // Lets make sure we never time out since we need to stall some things to force our scenario + signer_config.block_proposal_validation_timeout = Duration::from_secs(1800); + signer_config.tenure_last_block_proposal_timeout = Duration::from_secs(1800); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(1800); + }, + |config| config.miner.max_execution_time_secs = Some(0), + |config| config.miner.max_execution_time_secs = None, + ); + let rl1_skip_commit_op = miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone(); + let rl2_skip_commit_op = miners.rl2_counters.naka_skip_commit_op.clone(); + + let (conf_1, _) = miners.get_node_configs(); + let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); + let (_miner_pk_1, _) = miners.get_miner_public_keys(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + miners.boot_to_epoch_3(); + + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + info!("------------------------- Pause Miner 1's Block Commits -------------------------"); + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Mines a Nakamoto Block N -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by Block N"); + + miners.wait_for_test_observer_blocks(60); + + // First, lets deploy the contract + let dummy_contract_src = + "(define-public (dummy (number uint)) (begin (ok (+ number u1))))(+ 1 1)"; + + let (tx1, sender_nonce) = miners.send_transfer_tx(); + + let _ = miners + .send_and_mine_contract_publish(sender_nonce + 1, "dummy-contract", dummy_contract_src, 60) + .expect_err("Expected an error while publishing contract in a new block"); + + assert_eq!(last_block_contains_txid(&tx1), true); + + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!("------------------------- Miner 2 Submits a Block Commit -------------------------"); + miners.submit_commit_miner_2(&sortdb); + + info!("------------------------- Mine Tenure -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by Block N+3"); + + info!("------------------------- Miner 2 Mines Block N+1 -------------------------"); + + let _ = miners + .send_and_mine_contract_publish(sender_nonce + 1, "dummy-contract", dummy_contract_src, 60) + .expect("Failed to publish contract in a new block"); + + verify_sortition_winner(&sortdb, &miner_pkh_2); + + // ensure both miners are aligned + miners.wait_for_chains(60); + + info!("------------------------- Shutdown -------------------------"); + miners.shutdown(); +} + +/// This function intends to check the timing of the mempool iteration when +/// there are a large number of transactions in the mempool. It will boot to +/// epoch 3, fan out some STX transfers to a large number of accounts, wait for +/// these to all be mined, and then pause block mining, and submit a large +/// number of transactions to the mempool. It will then unpause block mining +/// and wait for the first block to be mined. Since the default miner +/// configuration specifies to spend 5 seconds mining a block, we expect that +/// this first block should be proposed within 10 seconds and approved within +/// 20 seconds. We also verify that the block contains at least 5,000 +/// transactions, since a lower count than that would indicate a clear +/// regression. Several tests below call this function, testing different +/// strategies and fees. +fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times. + // With a fee of 180 to 2000 uSTX per send, we need each account to have + // 2001 * 25 = 50_025 uSTX. + // The 260 accounts in the middle will need to have enough to send that + // amount to 25 other accounts, plus the fee, and then enough to send the + // transfers themselves as well: + // (50025 + 180) * 25 + 50025 = 1_305_150 uSTX. + // The 10 initial accounts will need to have enough to send that amount to + // 25 other accounts, plus enough to send the transfers themselves as well: + // (1305150 + 180) * 25 + 1305150 = 33_938_400 uSTX. + let initial_balance = 33_938_400; + let initial_balances = initial_sender_addrs + .iter() + .map(|addr| (addr.clone(), initial_balance)) + .collect::>(); + + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |conf| { + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.miner.mempool_walk_strategy = strategy; + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + signer_test.running_nodes.conf.node.working_dir + ); + let chain_id = signer_test.running_nodes.conf.burnchain.chain_id; + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 1_305_150, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 50_025, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let fee = set_fee(); + assert!(fee >= 180 && fee <= 2000); + let transfer_tx = make_stacks_transfer(sender_sk, *nonce, fee, chain_id, &recipient, 1); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + + info!("Sending transfers took {:?}", timer.elapsed()); + + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + let blocks_before = test_observer::get_blocks().len(); + + info!("Mining transfers..."); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + if strategy == MemPoolWalkStrategy::NextNonceWithHighestFeeRate { + assert!(last_block.tx_events.len() > 5000); + } + + // Wait for the first block to be accepted. + wait_for(20, || { + let blocks = test_observer::get_blocks().len(); + Ok(blocks > blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +fn large_mempool_original_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || 180); +} + +#[test] +#[ignore] +fn large_mempool_original_random_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} + +#[test] +#[ignore] +fn large_mempool_next_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || 180); +} + +#[test] +#[ignore] +fn large_mempool_next_random_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts, all with the same fee. It +/// will then unpause block mining and wait for the first block to be mined. +/// Since the default miner configuration specifies to spend 5 seconds mining a +/// block, we expect that this first block should be proposed within 10 seconds +/// and approved within 20 seconds. We also verify that the block contains at +/// least 5,000 transactions, since a lower count than that would indicate a +/// clear regression. +fn larger_mempool() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 uSTX per send, we need each account to end up with + // 2001 * 25 * 10 = 500_250 uSTX. + // The 260 accounts in the middle will need to have + // (500250 + 180) * 26 = 13_011_180 uSTX. + // The 10 initial accounts will need to have + // (13011180 + 180) * 26 = 338_295_360 uSTX. + let initial_balance = 338_295_360; + let initial_balances = initial_sender_addrs + .iter() + .map(|addr| (addr.clone(), initial_balance)) + .collect::>(); + + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |conf| { + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + signer_test.running_nodes.conf.node.working_dir + ); + let chain_id = signer_test.running_nodes.conf.burnchain.chain_id; + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 13_011_180, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 500_250, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..10 { + let db_tx = conn.transaction().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = + make_stacks_transfer(sender_sk, *nonce, transfer_fee, chain_id, &recipient, 1); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + } + + info!("Sending transfers took {:?}", timer.elapsed()); + + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + info!("Mining transfers..."); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + + // Wait for the first round of transfers to all be mined + wait_for(43200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of transfers to be mined"); + + info!("Mining first round of transfers took {:?}", timer.elapsed()); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test verifies that a a signer will send update messages to stackerdb when it updates its internal state +/// +/// For a new bitcoin block arrival, the signers send a local state update message with this updated block and miner +/// For an inactive miner, the signer sends a local state update message indicating it is reverting to the prior miner +fn signers_send_state_message_updates() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + + // We want the miner to be marked as inactive so signers will send an update message indicating it. + // Therefore, set the block proposal timeout to something small enough to force a winning miner to timeout. + let block_proposal_timeout = Duration::from_secs(20); + let tenure_extend_wait_timeout = block_proposal_timeout; + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + 0, + |signer_config| { + signer_config.block_proposal_timeout = block_proposal_timeout; + }, + |config| { + config.miner.tenure_extend_wait_timeout = tenure_extend_wait_timeout; + config.miner.block_commit_delay = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + ); + + let rl1_skip_commit_op = miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone(); + let rl2_skip_commit_op = miners.rl2_counters.naka_skip_commit_op.clone(); + + let (conf_1, _) = miners.get_node_configs(); + let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); + let (miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + miners.boot_to_epoch_3(); + + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let get_burn_consensus_hash = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .consensus_hash + }; + let starting_peer_height = get_chain_info(&conf_1).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Tenure Starts and Mines Block N-------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by tenure change tx."); + btc_blocks_mined += 1; + + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!("------------------------- Confirm Miner 1 is the Active Miner in Update -------------------------"); + // Verify that signers first sent a bitcoin block update + wait_for_state_machine_update( + 60, + &get_burn_consensus_hash(), + starting_burn_height + 1, + Some((miner_pkh_1, starting_peer_height)), + ) + .expect("Timed out waiting for signers to send a state update"); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + test_observer::clear(); + miners.submit_commit_miner_2(&sortdb); + + // Pause the block proposal broadcast so that miner 2 will be unable to broadcast its + // tenure change proposal BEFORE the block_proposal_timeout and will be marked invalid. + // Also pause miner 1's blocks so we don't go extending that tenure either + TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1, miner_pk_2]); + + info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"); + miners + .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 60) + .expect("Timed out waiting for BTC block"); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 2 won + verify_sortition_winner(&sortdb, &miner_pkh_2); + + info!("------------------------- Confirm Miner 2 is the Active Miner -------------------------{}, {}, {miner_pkh_2}", starting_burn_height + 2, starting_peer_height); + // We cannot confirm the height cause some signers may or may not be aware of the delayed stacks block + wait_for_state_machine_update( + 60, + &get_burn_consensus_hash(), + starting_burn_height + 2, + Some((miner_pkh_2, starting_peer_height + 1)), + ) + .expect("Timed out waiting for signers to send their state update"); + + test_observer::clear(); + info!( + "------------------------- Wait for Miner 2 to be Marked Invalid -------------------------" + ); + // Make sure that miner 2 gets marked invalid by not proposing a block BEFORE block_proposal_timeout + std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + // Allow miner 2 to propose its late block and see the signer get marked malicious + TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1]); + + info!("------------------------- Confirm Miner 1 is the Active Miner Again -------------------------"); + wait_for_state_machine_update( + 60, + &get_burn_consensus_hash(), + starting_burn_height + 2, + Some((miner_pkh_1, starting_peer_height)), + ) + .expect("Timed out waiting for signers to send their state update"); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!( + miners.get_peer_stacks_tip_height(), + starting_peer_height + 1 + ); + miners.shutdown(); +} diff --git a/versions.toml b/versions.toml index 138c89c7173..d33360ad171 100644 --- a/versions.toml +++ b/versions.toml @@ -1,4 +1,4 @@ # Update these values when a new release is created. # `stacks-common/build.rs` will automatically update `versions.rs` with these values. -stacks_node_version = "3.1.0.0.7" -stacks_signer_version = "3.1.0.0.7.0" +stacks_node_version = "6.3.0.0.0" +stacks_signer_version = "6.3.0.0.0.2"