diff --git a/.github/workflows/beekeeper.yml b/.github/workflows/beekeeper.yml
index ff36eff2514..c2448465d7f 100644
--- a/.github/workflows/beekeeper.yml
+++ b/.github/workflows/beekeeper.yml
@@ -8,7 +8,7 @@ on:
- "**"
env:
- K3S_VERSION: "v1.30.3+k3s1"
+ K3S_VERSION: "v1.31.10+k3s1"
REPLICA: 3
RUN_TYPE: "PR RUN"
SETUP_CONTRACT_IMAGE: "ethersphere/bee-localchain"
@@ -33,18 +33,18 @@ jobs:
msg: ${{ steps.commit.outputs.msg }}
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
if: github.event.action != 'beekeeper'
with:
fetch-depth: 0
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
if: github.event.action == 'beekeeper'
with:
fetch-depth: 0
ref: ${{ github.event.client_payload.ref }}
- name: Setup Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
cache: false
go-version-file: go.mod
@@ -158,6 +158,9 @@ jobs:
- name: Test manifest
id: manifest
run: timeout ${TIMEOUT} beekeeper check --cluster-name local-dns --checks=ci-manifest
+ - name: Test manifest v1
+ id: manifest-v1
+ run: timeout ${TIMEOUT} beekeeper check --cluster-name local-dns --checks=ci-manifest-v1
- name: Test postage stamps
id: postage-stamps
run: timeout ${TIMEOUT} beekeeper check --cluster-name local-dns --checks ci-postage
@@ -173,6 +176,9 @@ jobs:
- name: Test act
id: act
run: timeout ${TIMEOUT} bash -c 'until beekeeper check --cluster-name local-dns --checks ci-act; do echo "waiting for act..."; sleep .3; done'
+ - name: Test feeds v1
+ id: feeds-v1
+ run: timeout ${TIMEOUT} beekeeper check --cluster-name local-dns --checks=ci-feed-v1
- name: Test feeds
id: feeds
run: timeout ${TIMEOUT} beekeeper check --cluster-name local-dns --checks=ci-feed
@@ -186,11 +192,18 @@ jobs:
if ${{ steps.settlements.outcome=='failure' }}; then FAILED=settlements; fi
if ${{ steps.pss.outcome=='failure' }}; then FAILED=pss; fi
if ${{ steps.soc.outcome=='failure' }}; then FAILED=soc; fi
+ if ${{ steps.gsoc.outcome=='failure' }}; then FAILED=gsoc; fi
if ${{ steps.pushsync-chunks-1.outcome=='failure' }}; then FAILED=pushsync-chunks-1; fi
if ${{ steps.pushsync-chunks-2.outcome=='failure' }}; then FAILED=pushsync-chunks-2; fi
if ${{ steps.retrieval.outcome=='failure' }}; then FAILED=retrieval; fi
if ${{ steps.manifest.outcome=='failure' }}; then FAILED=manifest; fi
- if ${{ steps.content-availability.outcome=='failure' }}; then FAILED=content-availability; fi
+ if ${{ steps.manifest-v1.outcome=='failure' }}; then FAILED=manifest-v1; fi
+ if ${{ steps.postage-stamps.outcome=='failure' }}; then FAILED=postage-stamps; fi
+ if ${{ steps.stake.outcome=='failure' }}; then FAILED=stake; fi
+ if ${{ steps.withdraw.outcome=='failure' }}; then FAILED=withdraw; fi
+ if ${{ steps.redundancy.outcome=='failure' }}; then FAILED=redundancy; fi
+ if ${{ steps.feeds.outcome=='failure' }}; then FAILED=feeds; fi
+ if ${{ steps.feeds-v1.outcome=='failure' }}; then FAILED=feeds-v1; fi
if ${{ steps.act.outcome=='failure' }}; then FAILED=act; fi
curl -sSf -X POST -H "Content-Type: application/json" -d "{\"text\": \"**${RUN_TYPE}** Beekeeper Error\nBranch: \`${{ github.head_ref }}\`\nUser: @${{ github.event.pull_request.user.login }}\nDebugging artifacts: [click](https://$BUCKET_NAME.$AWS_ENDPOINT/artifacts_$VERTAG.tar.gz)\nStep failed: \`${FAILED}\`\"}" https://beehive.ethswarm.org/hooks/${{ secrets.TUNSHELL_KEY }}
echo "Failed test: ${FAILED}"
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index dea9ed5ce5e..38402e52cda 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -40,7 +40,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 83097774128..fb41df6f301 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: '0'
- name: Check whether docs have changed
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
index b705d804914..f2d6523c4f4 100644
--- a/.github/workflows/go.yml
+++ b/.github/workflows/go.yml
@@ -21,12 +21,21 @@ jobs:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Setup Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
cache: true
go-version-file: go.mod
+ - name: Increase UDP buffer sizes (Ubuntu)
+ if: matrix.os == 'ubuntu-latest'
+ run: |
+ sudo sysctl -w net.core.rmem_max=7500000
+ sudo sysctl -w net.core.wmem_max=7500000
+ - name: Increase UDP buffer sizes (macOS)
+ if: matrix.os == 'macos-latest'
+ run: |
+ sudo sysctl -w kern.ipc.maxsockbuf=6291456
- name: Build
run: make build
- name: Test with race detector (Ubuntu and MacOS)
@@ -41,9 +50,9 @@ jobs:
continue-on-error: ${{ github.ref == 'refs/heads/master' }}
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Setup Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
cache: true
go-version-file: go.mod
@@ -55,22 +64,24 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Setup Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
cache: false
go-version-file: go.mod
- name: Commit linting
if: github.ref != 'refs/heads/master'
uses: wagoid/commitlint-github-action@v5
+ with:
+ configFile: commitlint.config.js
- name: GolangCI-Lint
- uses: golangci/golangci-lint-action@v6
+ uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8
with:
skip-cache: false
- version: v1.64.5
+ version: v2.5.0
- name: Whitespace check
run: make check-whitespace
- name: go mod tidy check
@@ -81,9 +92,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Setup Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
cache: false
go-version-file: go.mod
@@ -97,7 +108,7 @@ jobs:
- name: Test with code coverage
run: make cover=1 test-ci
- name: Upload coverage to Codecov
- uses: codecov/codecov-action@v5
+ uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
@@ -109,11 +120,11 @@ jobs:
if: github.ref == 'refs/heads/master'
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Setup Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
cache: false
go-version-file: go.mod
diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml
new file mode 100644
index 00000000000..7958bfacbc4
--- /dev/null
+++ b/.github/workflows/pr-title.yml
@@ -0,0 +1,23 @@
+name: PR Title Lint
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened, edited]
+
+jobs:
+ lint-title:
+ name: Lint PR Title
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Setup Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: '20'
+ - name: Install commitlint
+ run: npm install --save-dev @commitlint/cli @commitlint/config-conventional
+ - name: PR title linting
+ env:
+ PR_TITLE: ${{ github.event.pull_request.title }}
+ run: echo "$PR_TITLE" | npx commitlint --config ./commitlint.config.js
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 8d90e7c947d..d100446245e 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -12,11 +12,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Setup Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
go-version-file: go.mod
- name: Docker Hub and Quay Login
@@ -24,9 +24,9 @@ jobs:
printf ${{ secrets.DOCKERHUB_PASSWORD }} | docker login --username ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin
printf ${{ secrets.QUAY_PASSWORD }} | docker login --username ${{ secrets.QUAY_USERNAME }} quay.io --password-stdin
- name: Set up QEMU
- uses: docker/setup-qemu-action@v3
+ uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
- name: Import GPG key
run: |
echo "$GPG_PRIVATE_KEY" | gpg --import --passphrase "$GPG_PASSPHRASE" --batch --allow-secret-key-import
@@ -37,7 +37,7 @@ jobs:
run: |
echo "BEE_API_VERSION=$(grep '^ version:' openapi/Swarm.yaml | awk '{print $2}')" >> $GITHUB_ENV
- name: Run GoReleaser
- uses: goreleaser/goreleaser-action@v6
+ uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6
with:
version: '~> v2'
args: release --clean --timeout 1h
diff --git a/.gitignore b/.gitignore
index b7b0629ab25..c94f81d64ab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
/.idea
/.vscode
/tmp
+/vendor
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
diff --git a/.golangci.yml b/.golangci.yml
index 4f07d6fa978..f6c9c2e8bed 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,5 +1,4 @@
-run:
- timeout: 10m
+version: "2"
linters:
enable:
- asciicheck
@@ -7,19 +6,14 @@ linters:
- copyloopvar
- dogsled
- durationcheck
- - errcheck
- errname
- errorlint
- forbidigo
- gochecknoinits
- goconst
- - gofmt
- goheader
- goprintffuncname
- - gosimple
- - govet
- importas
- - ineffassign
- misspell
- nilerr
- noctx
@@ -27,49 +21,62 @@ linters:
- prealloc
- predeclared
- promlinter
- - staticcheck
- thelper
- - typecheck
- unconvert
- unused
- # - depguard disable temporary until this issue is resolved: https://github.com/golangci/golangci-lint/issues/3906
-
-linters-settings:
- govet:
- enable-all: true
- disable:
- - fieldalignment ## temporally disabled
- - shadow ## temporally disabled
- goheader:
- values:
- regexp:
- date: "20[1-2][0-9]"
- template: |-
- Copyright {{date}} The Swarm Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file.
- paralleltest:
- # Ignore missing calls to `t.Parallel()` and only report incorrect uses of `t.Parallel()`.
- ignore-missing: true
-issues:
- exclude-rules:
- - linters:
- - goheader
- text: "go-ethereum Authors" ## disable check for other authors
- - path: _test\.go
- linters:
- - goconst ## temporally disable goconst in test
- - linters:
- - forbidigo
- path: cmd/bee/cmd
- text: "use of `fmt.Print" ## allow fmt.Print in cmd directory
- - linters:
- - dogsled
- path: pkg/api/(.+)_test\.go # temporally disable dogsled in api test files
- - linters:
- - dogsled
- path: pkg/pushsync/(.+)_test\.go # temporally disable dogsled in pushsync test files
- # temporally disable paralleltest in following packages
- - linters:
- - paralleltest
- path: pkg/log
+ settings:
+ goheader:
+ values:
+ regexp:
+ date: 20[1-2][0-9]
+ template: |-
+ Copyright {{date}} The Swarm Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+ govet:
+ disable:
+ - fieldalignment
+ - shadow
+ enable-all: true
+ paralleltest:
+ ignore-missing: true
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ rules:
+ - linters:
+ - goheader
+ text: go-ethereum Authors
+ - linters:
+ - goconst
+ path: _test\.go
+ - linters:
+ - forbidigo
+ path: cmd/bee/cmd
+ text: use of `fmt.Print
+ - linters:
+ - dogsled
+ path: pkg/api/(.+)_test\.go
+ - linters:
+ - dogsled
+ path: pkg/pushsync/(.+)_test\.go
+ - linters:
+ - paralleltest
+ path: pkg/log
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/.goreleaser.yml b/.goreleaser.yml
index 8934e0fde66..78066917a98 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -293,6 +293,14 @@ brews:
email: bee-worker@ethswarm.org
homepage: https://swarm.ethereum.org/
description: Ethereum Swarm node
+ # Setting this will prevent goreleaser to actually try to commit the updated
+ # formula - instead, the formula file will be stored on the dist directory
+ # only, leaving the responsibility of publishing it to the user.
+ # If set to auto, the release will not be uploaded to the homebrew tap
+ # in case there is an indicator for prerelease in the tag e.g. v1.0.0-rc1
+ #
+ # Templates: allowed.
+ skip_upload: auto
caveats: |
Logs: #{var}/log/swarm-bee/bee.log
Config: #{etc}/swarm-bee/bee.yaml
diff --git a/Dockerfile b/Dockerfile
index 0850b2ed8cf..26e9920166f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.24 AS build
+FROM golang:1.25 AS build
WORKDIR /src
# enable modules caching in separate layer
@@ -8,7 +8,7 @@ COPY . ./
RUN make binary
-FROM debian:12.10-slim
+FROM debian:12.12-slim
ENV DEBIAN_FRONTEND=noninteractive
diff --git a/Dockerfile.dev b/Dockerfile.dev
index a417c710cbc..60f6f175cb3 100644
--- a/Dockerfile.dev
+++ b/Dockerfile.dev
@@ -1,4 +1,4 @@
-FROM golang:1.24 AS build
+FROM golang:1.25 AS build
ARG REACHABILITY_OVERRIDE_PUBLIC=false
ARG BATCHFACTOR_OVERRIDE_PUBLIC=5
@@ -13,18 +13,24 @@ RUN make binary \
REACHABILITY_OVERRIDE_PUBLIC=${REACHABILITY_OVERRIDE_PUBLIC} \
BATCHFACTOR_OVERRIDE_PUBLIC=${BATCHFACTOR_OVERRIDE_PUBLIC}
-FROM debian:12.10-slim
+FROM debian:12.12-slim
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
- ca-certificates; \
+ ca-certificates \
+ iputils-ping \
+ netcat-openbsd \
+ telnet \
+ curl \
+ wget \
+ jq \
+ net-tools; \
apt-get clean; \
rm -rf /var/lib/apt/lists/*; \
groupadd -r bee --gid 999; \
useradd -r -g bee --uid 999 --no-log-init -m bee;
-# make sure mounted volumes have correct permissions
RUN mkdir -p /home/bee/.bee && chown 999:999 /home/bee/.bee
COPY --from=build /src/dist/bee /usr/local/bin/bee
diff --git a/Dockerfile.goreleaser b/Dockerfile.goreleaser
index 2a62048228e..6772cb56963 100644
--- a/Dockerfile.goreleaser
+++ b/Dockerfile.goreleaser
@@ -1,4 +1,4 @@
-FROM debian:12.10-slim
+FROM debian:12.12-slim
ENV DEBIAN_FRONTEND=noninteractive
diff --git a/Dockerfile.scratch b/Dockerfile.scratch
index e11761aa326..1ecff0f7fbc 100644
--- a/Dockerfile.scratch
+++ b/Dockerfile.scratch
@@ -1,4 +1,4 @@
-FROM debian:12.10-slim
+FROM debian:12.12-slim
ENV DEBIAN_FRONTEND=noninteractive
diff --git a/Makefile b/Makefile
index 01568f1136b..504f87cb190 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
GO ?= go
GOBIN ?= $$($(GO) env GOPATH)/bin
GOLANGCI_LINT ?= $(GOBIN)/golangci-lint
-GOLANGCI_LINT_VERSION ?= v1.64.5
+GOLANGCI_LINT_VERSION ?= v2.5.0
GOGOPROTOBUF ?= protoc-gen-gogofaster
GOGOPROTOBUF_VERSION ?= v1.3.1
BEEKEEPER_INSTALL_DIR ?= $(GOBIN)
@@ -19,11 +19,9 @@ BEE_API_VERSION ?= "$(shell grep '^ version:' openapi/Swarm.yaml | awk '{print
VERSION ?= "$(shell git describe --tags --abbrev=0 | cut -c2-)"
COMMIT_HASH ?= "$(shell git describe --long --dirty --always --match "" || true)"
CLEAN_COMMIT ?= "$(shell git describe --long --always --match "" || true)"
-COMMIT_TIME ?= "$(shell git show -s --format=%ct $(CLEAN_COMMIT) || true)"
LDFLAGS ?= -s -w \
-X github.com/ethersphere/bee/v2.version="$(VERSION)" \
-X github.com/ethersphere/bee/v2.commitHash="$(COMMIT_HASH)" \
--X github.com/ethersphere/bee/v2.commitTime="$(COMMIT_TIME)" \
-X github.com/ethersphere/bee/v2/pkg/api.Version="$(BEE_API_VERSION)" \
-X github.com/ethersphere/bee/v2/pkg/p2p/libp2p.reachabilityOverridePublic="$(REACHABILITY_OVERRIDE_PUBLIC)" \
-X github.com/ethersphere/bee/v2/pkg/postage/listener.batchFactorOverridePublic="$(BATCHFACTOR_OVERRIDE_PUBLIC)"
@@ -172,4 +170,4 @@ clean:
$(GO) clean
rm -rf dist/
-FORCE:
\ No newline at end of file
+FORCE:
diff --git a/README.md b/README.md
index ad6caf49529..34d71449e8c 100644
--- a/README.md
+++ b/README.md
@@ -7,7 +7,7 @@
[](https://docs.ethswarm.org/api/)


-
+[](https://github.com/ethersphere/bee/blob/master/LICENSE)
## DISCLAIMER
@@ -34,7 +34,7 @@ Please read the [coding guidelines](CODING.md) and [style guide](CODINGSTYLE.md)
## Installing
-[Install instructions](https://docs.ethswarm.org/docs/installation/quick-start)
+[Install instructions](https://docs.ethswarm.org/docs/bee/installation/quick-start)
## Get in touch
diff --git a/cmd/bee/cmd/cmd.go b/cmd/bee/cmd/cmd.go
index e10e015ce81..98a6323be31 100644
--- a/cmd/bee/cmd/cmd.go
+++ b/cmd/bee/cmd/cmd.go
@@ -17,6 +17,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/node"
"github.com/ethersphere/bee/v2/pkg/swarm"
+ p2pforge "github.com/ipshipyard/p2p-forge/client"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -71,7 +72,6 @@ const (
optionNameStaticNodes = "static-nodes"
optionNameAllowPrivateCIDRs = "allow-private-cidrs"
optionNameSleepAfter = "sleep-after"
- optionNameUsePostageSnapshot = "use-postage-snapshot"
optionNameStorageIncentivesEnable = "storage-incentives-enable"
optionNameStateStoreCacheCapacity = "statestore-cache-capacity"
optionNameTargetNeighborhood = "target-neighborhood"
@@ -81,6 +81,13 @@ const (
optionMinimumStorageRadius = "minimum-storage-radius"
optionReserveCapacityDoubling = "reserve-capacity-doubling"
optionSkipPostageSnapshot = "skip-postage-snapshot"
+ optionNameMinimumGasTipCap = "minimum-gas-tip-cap"
+ optionNameP2PWSSEnable = "p2p-wss-enable"
+ optionP2PWSSAddr = "p2p-wss-addr"
+ optionNATWSSAddr = "nat-wss-addr"
+ optionAutoTLSDomain = "autotls-domain"
+ optionAutoTLSRegistrationEndpoint = "autotls-registration-endpoint"
+ optionAutoTLSCAEndpoint = "autotls-ca-endpoint"
)
// nolint:gochecknoinits
@@ -280,7 +287,6 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().Bool(optionNamePProfMutex, false, "enable pprof mutex profile")
cmd.Flags().StringSlice(optionNameStaticNodes, []string{}, "protect nodes from getting kicked out on bootnode")
cmd.Flags().Bool(optionNameAllowPrivateCIDRs, false, "allow to advertise private CIDRs to the public network")
- cmd.Flags().Bool(optionNameUsePostageSnapshot, false, "bootstrap node using postage snapshot from the network")
cmd.Flags().Bool(optionNameStorageIncentivesEnable, true, "enable storage incentives feature")
cmd.Flags().Uint64(optionNameStateStoreCacheCapacity, 100_000, "lru memory caching capacity in number of statestore entries")
cmd.Flags().String(optionNameTargetNeighborhood, "", "neighborhood to target in binary format (ex: 111111001) for mining the initial overlay")
@@ -290,6 +296,13 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().Uint(optionMinimumStorageRadius, 0, "minimum radius storage threshold")
cmd.Flags().Int(optionReserveCapacityDoubling, 0, "reserve capacity doubling")
cmd.Flags().Bool(optionSkipPostageSnapshot, false, "skip postage snapshot")
+ cmd.Flags().Uint64(optionNameMinimumGasTipCap, 0, "minimum gas tip cap in wei for transactions, 0 means use suggested gas tip cap")
+ cmd.Flags().Bool(optionNameP2PWSSEnable, false, "Enable Secure WebSocket P2P connections")
+ cmd.Flags().String(optionP2PWSSAddr, ":1635", "p2p wss address")
+ cmd.Flags().String(optionNATWSSAddr, "", "WSS NAT exposed address")
+ cmd.Flags().String(optionAutoTLSDomain, p2pforge.DefaultForgeDomain, "autotls domain")
+ cmd.Flags().String(optionAutoTLSRegistrationEndpoint, p2pforge.DefaultForgeEndpoint, "autotls registration endpoint")
+ cmd.Flags().String(optionAutoTLSCAEndpoint, p2pforge.DefaultCAEndpoint, "autotls certificate authority endpoint")
}
func newLogger(cmd *cobra.Command, verbosity string) (log.Logger, error) {
diff --git a/cmd/bee/cmd/db.go b/cmd/bee/cmd/db.go
index 62eee2591c2..8bca2ed31cc 100644
--- a/cmd/bee/cmd/db.go
+++ b/cmd/bee/cmd/db.go
@@ -31,7 +31,6 @@ import (
const (
optionNameValidation = "validate"
- optionNameValidationPin = "validate-pin"
optionNameCollectionPin = "pin"
optionNameOutputLocation = "output"
)
diff --git a/cmd/bee/cmd/db_test.go b/cmd/bee/cmd/db_test.go
index f52da4bd0d2..7d32b5c3080 100644
--- a/cmd/bee/cmd/db_test.go
+++ b/cmd/bee/cmd/db_test.go
@@ -39,7 +39,7 @@ func TestDBExportImport(t *testing.T) {
chunks := make(map[string]int)
nChunks := 10
- for i := 0; i < nChunks; i++ {
+ for range nChunks {
ch := storagetest.GenerateTestRandomChunk()
err := db1.ReservePutter().Put(ctx, ch)
if err != nil {
@@ -101,13 +101,13 @@ func TestDBExportImportPinning(t *testing.T) {
pins := make(map[string]any)
nChunks := 10
- for i := 0; i < 2; i++ {
+ for range 2 {
rootAddr := swarm.RandAddress(t)
collection, err := db1.NewCollection(ctx)
if err != nil {
t.Fatal(err)
}
- for j := 0; j < nChunks; j++ {
+ for range nChunks {
ch := storagetest.GenerateTestRandomChunk()
err = collection.Put(ctx, ch)
if err != nil {
@@ -186,7 +186,7 @@ func TestDBNuke_FLAKY(t *testing.T) {
}, dataDir)
nChunks := 10
- for i := 0; i < nChunks; i++ {
+ for range nChunks {
ch := storagetest.GenerateTestRandomChunk()
err := db.ReservePutter().Put(ctx, ch)
if err != nil {
@@ -241,7 +241,7 @@ func TestDBInfo(t *testing.T) {
}, dir1)
nChunks := 10
- for i := 0; i < nChunks; i++ {
+ for range nChunks {
ch := storagetest.GenerateTestRandomChunk()
err := db1.ReservePutter().Put(ctx, ch)
if err != nil {
diff --git a/cmd/bee/cmd/deploy.go b/cmd/bee/cmd/deploy.go
index 89880273dab..c0e20cb63e7 100644
--- a/cmd/bee/cmd/deploy.go
+++ b/cmd/bee/cmd/deploy.go
@@ -59,6 +59,7 @@ func (c *command) initDeployCmd() error {
signer,
blocktime,
true,
+ c.config.GetUint64(optionNameMinimumGasTipCap),
)
if err != nil {
return err
diff --git a/cmd/bee/cmd/split.go b/cmd/bee/cmd/split.go
index ebe798e878d..112b32ed2fa 100644
--- a/cmd/bee/cmd/split.go
+++ b/cmd/bee/cmd/split.go
@@ -116,13 +116,13 @@ func splitRefs(cmd *cobra.Command) {
}
logger.Debug("write root", "hash", rootRef)
- _, err = writer.WriteString(fmt.Sprintf("%s\n", rootRef))
+ _, err = fmt.Fprintf(writer, "%s\n", rootRef)
if err != nil {
return fmt.Errorf("write root hash: %w", err)
}
for _, ref := range refs {
logger.Debug("write chunk", "hash", ref)
- _, err = writer.WriteString(fmt.Sprintf("%s\n", ref))
+ _, err = fmt.Fprintf(writer, "%s\n", ref)
if err != nil {
return fmt.Errorf("write chunk address: %w", err)
}
diff --git a/cmd/bee/cmd/start.go b/cmd/bee/cmd/start.go
index dab68c447fb..2268e8549c1 100644
--- a/cmd/bee/cmd/start.go
+++ b/cmd/bee/cmd/start.go
@@ -68,11 +68,8 @@ func (c *command) initStartCmd() (err error) {
}
fmt.Print(beeWelcomeMessage)
- fmt.Printf("\n\nversion: %v - planned to be supported until %v, please follow https://ethswarm.org/\n\n", bee.Version, endSupportDate())
logger.Info("bee version", "version", bee.Version)
- go startTimeBomb(logger)
-
// ctx is global context of bee node; which is canceled after interrupt signal is received.
ctx, cancel := context.WithCancel(context.Background())
sysInterruptChannel := make(chan os.Signal, 1)
@@ -281,12 +278,16 @@ func buildBeeNode(ctx context.Context, c *command, cmd *cobra.Command, logger lo
Addr: c.config.GetString(optionNameP2PAddr),
AllowPrivateCIDRs: c.config.GetBool(optionNameAllowPrivateCIDRs),
APIAddr: c.config.GetString(optionNameAPIAddr),
+ EnableWSS: c.config.GetBool(optionNameP2PWSSEnable),
+ WSSAddr: c.config.GetString(optionP2PWSSAddr),
+ AutoTLSStorageDir: filepath.Join(c.config.GetString(optionNameDataDir), "autotls"),
BlockchainRpcEndpoint: c.config.GetString(optionNameBlockchainRpcEndpoint),
BlockProfile: c.config.GetBool(optionNamePProfBlock),
BlockTime: networkConfig.blockTime,
BootnodeMode: bootNode,
Bootnodes: networkConfig.bootNodes,
CacheCapacity: c.config.GetUint64(optionNameCacheCapacity),
+ AutoTLSCAEndpoint: c.config.GetString(optionAutoTLSCAEndpoint),
ChainID: networkConfig.chainID,
ChequebookEnable: c.config.GetBool(optionNameChequebookEnable),
CORSAllowedOrigins: c.config.GetStringSlice(optionCORSAllowedOrigins),
@@ -297,11 +298,15 @@ func buildBeeNode(ctx context.Context, c *command, cmd *cobra.Command, logger lo
DBWriteBufferSize: c.config.GetUint64(optionNameDBWriteBufferSize),
EnableStorageIncentives: c.config.GetBool(optionNameStorageIncentivesEnable),
EnableWS: c.config.GetBool(optionNameP2PWSEnable),
+ AutoTLSDomain: c.config.GetString(optionAutoTLSDomain),
+ AutoTLSRegistrationEndpoint: c.config.GetString(optionAutoTLSRegistrationEndpoint),
FullNodeMode: fullNode,
Logger: logger,
+ MinimumGasTipCap: c.config.GetUint64(optionNameMinimumGasTipCap),
MinimumStorageRadius: c.config.GetUint(optionMinimumStorageRadius),
MutexProfile: c.config.GetBool(optionNamePProfMutex),
NATAddr: c.config.GetString(optionNameNATAddr),
+ NATWSSAddr: c.config.GetString(optionNATWSSAddr),
NeighborhoodSuggester: neighborhoodSuggester,
PaymentEarly: c.config.GetInt64(optionNamePaymentEarly),
PaymentThreshold: c.config.GetString(optionNamePaymentThreshold),
@@ -326,7 +331,6 @@ func buildBeeNode(ctx context.Context, c *command, cmd *cobra.Command, logger lo
TracingEndpoint: tracingEndpoint,
TracingServiceName: c.config.GetString(optionNameTracingServiceName),
TrxDebugMode: c.config.GetBool(optionNameTransactionDebugMode),
- UsePostageSnapshot: c.config.GetBool(optionNameUsePostageSnapshot),
WarmupTime: c.config.GetDuration(optionWarmUpTime),
WelcomeMessage: c.config.GetString(optionWelcomeMessage),
WhitelistedWithdrawalAddress: c.config.GetStringSlice(optionNameWhitelistedWithdrawalAddress),
diff --git a/cmd/bee/cmd/start_dev.go b/cmd/bee/cmd/start_dev.go
index 4440b3fe441..9c82d11be60 100644
--- a/cmd/bee/cmd/start_dev.go
+++ b/cmd/bee/cmd/start_dev.go
@@ -14,13 +14,15 @@ import (
"github.com/ethersphere/bee/v2/pkg/node"
"github.com/kardianos/service"
"github.com/spf13/cobra"
+ "golang.org/x/term"
)
func (c *command) initStartDevCmd() (err error) {
cmd := &cobra.Command{
Use: "dev",
- Short: "Start a Swarm node in development mode",
+ Short: "Start in dev mode. WARNING: This command will be deprecated soon.",
+ Long: "WARNING: This command will be deprecated soon. For more information, please refer to the official documentation: https://docs.ethswarm.org/docs/develop/tools-and-features/bee-dev-mode and check back regularly for updates.",
PersistentPreRunE: c.CheckUnknownParams,
RunE: func(cmd *cobra.Command, args []string) (err error) {
if len(args) > 0 {
@@ -54,6 +56,26 @@ func (c *command) initStartDevCmd() (err error) {
fmt.Println(beeASCII)
fmt.Println()
+
+ warningBox := `
+╔══════════════════════════════════════════════════════════════════════════════╗
+║ ║
+║ WARNING: This command will be deprecated soon. ║
+║ ║
+║ For more information, please refer to the official documentation: ║
+║ https://docs.ethswarm.org/docs/develop/tools-and-features/bee-dev-mode ║
+║ ║
+║ Please check back regularly for updates. ║
+║ ║
+╚══════════════════════════════════════════════════════════════════════════════╝
+`
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ fmt.Print("\u001b[33m" + warningBox + "\u001b[0m")
+ } else {
+ fmt.Print(warningBox)
+ }
+ fmt.Println()
+
fmt.Println("Starting in development mode")
fmt.Println()
diff --git a/cmd/bee/cmd/timebomb.go b/cmd/bee/cmd/timebomb.go
deleted file mode 100644
index f53f74627c3..00000000000
--- a/cmd/bee/cmd/timebomb.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "strconv"
- "time"
-
- "github.com/ethersphere/bee/v2"
- "github.com/ethersphere/bee/v2/pkg/log"
-)
-
-const (
- limitDays = 90
- warningDays = 0.9 * limitDays // show warning once 90% of the time bomb time has passed
- sleepFor = 30 * time.Minute
-)
-
-var (
- commitTime, _ = strconv.ParseInt(bee.CommitTime(), 10, 64)
- versionReleased = time.Unix(commitTime, 0)
-)
-
-func startTimeBomb(logger log.Logger) {
- for {
- outdated := time.Now().AddDate(0, 0, -limitDays)
-
- if versionReleased.Before(outdated) {
- logger.Warning("your node is outdated, please check for the latest version")
- } else {
- almostOutdated := time.Now().AddDate(0, 0, -warningDays)
-
- if versionReleased.Before(almostOutdated) {
- logger.Warning("your node is almost outdated, please check for the latest version")
- }
- }
-
- <-time.After(sleepFor)
- }
-}
-
-func endSupportDate() string {
- return versionReleased.AddDate(0, 0, limitDays).Format("2 January 2006")
-}
diff --git a/go.mod b/go.mod
index dfd5fbd4a5e..cf602914109 100644
--- a/go.mod
+++ b/go.mod
@@ -1,56 +1,58 @@
module github.com/ethersphere/bee/v2
-go 1.24.0
+go 1.25
-toolchain go1.24.2
+toolchain go1.25.2
require (
contrib.go.opencensus.io/exporter/prometheus v0.4.2
github.com/armon/go-radix v1.0.0
github.com/btcsuite/btcd/btcec/v2 v2.3.2
+ github.com/caddyserver/certmagic v0.21.6
github.com/coreos/go-semver v0.3.0
github.com/ethereum/go-ethereum v1.15.11
- github.com/ethersphere/batch-archive v0.0.4
+ github.com/ethersphere/batch-archive v0.0.5
github.com/ethersphere/go-price-oracle-abi v0.6.9
github.com/ethersphere/go-storage-incentives-abi v0.9.4
github.com/ethersphere/go-sw3-abi v0.6.9
github.com/ethersphere/langos v1.0.0
- github.com/go-playground/validator/v10 v10.11.1
+ github.com/go-playground/validator/v10 v10.19.0
github.com/gogo/protobuf v1.3.2
- github.com/google/go-cmp v0.6.0
- github.com/google/uuid v1.4.0
+ github.com/google/go-cmp v0.7.0
+ github.com/google/uuid v1.6.0
github.com/gorilla/handlers v1.4.2
- github.com/gorilla/mux v1.8.0
- github.com/gorilla/websocket v1.5.1
+ github.com/gorilla/mux v1.8.1
+ github.com/gorilla/websocket v1.5.3
github.com/hashicorp/go-multierror v1.1.1
- github.com/hashicorp/golang-lru/v2 v2.0.5
- github.com/ipfs/go-cid v0.4.1
+ github.com/hashicorp/golang-lru/v2 v2.0.7
+ github.com/ipfs/go-cid v0.5.0
+ github.com/ipshipyard/p2p-forge v0.7.0
github.com/kardianos/service v1.2.2
github.com/klauspost/reedsolomon v1.11.8
- github.com/libp2p/go-libp2p v0.33.2
- github.com/multiformats/go-multiaddr v0.12.3
- github.com/multiformats/go-multiaddr-dns v0.3.1
+ github.com/libp2p/go-libp2p v0.46.0
+ github.com/multiformats/go-multiaddr v0.16.1
+ github.com/multiformats/go-multiaddr-dns v0.4.1
github.com/multiformats/go-multihash v0.2.3
- github.com/multiformats/go-multistream v0.5.0
+ github.com/multiformats/go-multistream v0.6.1
github.com/opentracing/opentracing-go v1.2.0
- github.com/prometheus/client_golang v1.21.1
+ github.com/prometheus/client_golang v1.22.0
github.com/spf13/afero v1.6.0
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.7.0
- github.com/stretchr/testify v1.10.0
+ github.com/stretchr/testify v1.11.1
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/uber/jaeger-client-go v2.24.0+incompatible
- github.com/vmihailenco/msgpack/v5 v5.3.4
+ github.com/vmihailenco/msgpack/v5 v5.4.1
github.com/wealdtech/go-ens/v3 v3.5.1
gitlab.com/nolash/go-mockbytes v0.0.7
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.3.0
- golang.org/x/crypto v0.36.0
- golang.org/x/net v0.38.0
- golang.org/x/sync v0.12.0
- golang.org/x/sys v0.31.0
- golang.org/x/term v0.30.0
- golang.org/x/time v0.9.0
+ golang.org/x/crypto v0.45.0
+ golang.org/x/net v0.47.0
+ golang.org/x/sync v0.18.0
+ golang.org/x/sys v0.38.0
+ golang.org/x/term v0.37.0
+ golang.org/x/time v0.12.0
gopkg.in/yaml.v2 v2.4.0
resenje.org/feed v0.1.2
resenje.org/multex v0.1.0
@@ -59,102 +61,110 @@ require (
)
require (
- github.com/BurntSushi/toml v1.1.0 // indirect
+ github.com/BurntSushi/toml v1.3.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
+ github.com/caddyserver/zerossl v0.1.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/codahale/hdrhistogram v0.0.0-00010101000000-000000000000 // indirect
- github.com/consensys/bavard v0.1.27 // indirect
- github.com/consensys/gnark-crypto v0.16.0 // indirect
- github.com/containerd/cgroups v1.1.0 // indirect
- github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/consensys/gnark-crypto v0.18.1 // indirect
github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
- github.com/docker/go-units v0.5.0 // indirect
- github.com/elastic/gosigar v0.14.2 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect
github.com/ethereum/go-verkle v0.2.2 // indirect
github.com/felixge/fgprof v0.9.5
github.com/flynn/noise v1.1.0 // indirect
- github.com/francoispqt/gojay v1.2.13 // indirect
- github.com/fsnotify/fsnotify v1.6.0 // indirect
+ github.com/fsnotify/fsnotify v1.9.0 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/go-kit/log v0.2.1 // indirect
- github.com/go-logfmt/logfmt v0.5.1 // indirect
+ github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
- github.com/go-playground/locales v0.14.0 // indirect
- github.com/go-playground/universal-translator v0.18.0 // indirect
- github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
- github.com/godbus/dbus/v5 v5.1.0 // indirect
+ github.com/go-playground/locales v0.14.1 // indirect
+ github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
- github.com/google/gopacket v1.1.19 // indirect
- github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
- github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect
- github.com/hashicorp/errwrap v1.0.0 // indirect
+ github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/ipfs/go-log/v2 v2.5.1 // indirect
+ github.com/ipfs/go-log/v2 v2.6.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
- github.com/klauspost/compress v1.17.11 // indirect
- github.com/klauspost/cpuid/v2 v2.2.7 // indirect
- github.com/koron/go-ssdp v0.0.4 // indirect
- github.com/leodido/go-urn v1.2.1 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.10 // indirect
+ github.com/koron/go-ssdp v0.0.6 // indirect
+ github.com/leodido/go-urn v1.4.0 // indirect
+ github.com/libdns/libdns v0.2.2 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
- github.com/libp2p/go-flow-metrics v0.1.0 // indirect
+ github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
- github.com/libp2p/go-nat v0.2.0 // indirect
- github.com/libp2p/go-netroute v0.2.1 // indirect
+ github.com/libp2p/go-netroute v0.3.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
- github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
+ github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/magiconair/properties v1.8.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/miekg/dns v1.1.58 // indirect
+ github.com/mholt/acmez/v3 v3.0.0 // indirect
+ github.com/miekg/dns v1.1.66 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
- github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
- github.com/multiformats/go-multicodec v0.9.0 // indirect
- github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/multiformats/go-multicodec v0.9.1 // indirect
+ github.com/multiformats/go-varint v0.0.7
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/onsi/ginkgo/v2 v2.15.0 // indirect
- github.com/opencontainers/runtime-spec v1.2.0 // indirect
+ github.com/onsi/gomega v1.36.3 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pelletier/go-toml v1.8.0 // indirect
+ github.com/pion/datachannel v1.5.10 // indirect
+ github.com/pion/dtls/v2 v2.2.12 // indirect
+ github.com/pion/dtls/v3 v3.0.6 // indirect
+ github.com/pion/ice/v4 v4.0.10 // indirect
+ github.com/pion/interceptor v0.1.40 // indirect
+ github.com/pion/logging v0.2.3 // indirect
+ github.com/pion/mdns/v2 v2.0.7 // indirect
+ github.com/pion/randutil v0.1.0 // indirect
+ github.com/pion/rtcp v1.2.15 // indirect
+ github.com/pion/rtp v1.8.19 // indirect
+ github.com/pion/sctp v1.8.39 // indirect
+ github.com/pion/sdp/v3 v3.0.13 // indirect
+ github.com/pion/srtp/v3 v3.0.6 // indirect
+ github.com/pion/stun v0.6.1 // indirect
+ github.com/pion/stun/v3 v3.0.0 // indirect
+ github.com/pion/transport/v2 v2.2.10 // indirect
+ github.com/pion/transport/v3 v3.0.7 // indirect
+ github.com/pion/turn/v4 v4.0.2 // indirect
+ github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.62.0
- github.com/prometheus/procfs v0.15.1 // indirect
- github.com/prometheus/statsd_exporter v0.22.7 // indirect
- github.com/quic-go/qpack v0.4.0 // indirect
- github.com/quic-go/quic-go v0.42.0 // indirect
- github.com/quic-go/webtransport-go v0.6.0 // indirect
- github.com/raulk/go-watchdog v1.3.0 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.64.0
+ github.com/prometheus/procfs v0.16.1 // indirect
+ github.com/prometheus/statsd_exporter v0.26.1 // indirect
+ github.com/quic-go/qpack v0.6.0 // indirect
+ github.com/quic-go/quic-go v0.57.1 // indirect
+ github.com/quic-go/webtransport-go v0.9.0 // indirect
+ github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/shirou/gopsutil v3.21.5+incompatible // indirect
- github.com/smartystreets/assertions v1.1.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/cast v1.3.0 // indirect
github.com/spf13/jwalterweatherman v1.0.0 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/pflag v1.0.6 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/supranational/blst v0.3.14 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
@@ -162,21 +172,24 @@ require (
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wealdtech/go-multicodec v1.4.0 // indirect
+ github.com/wlynxg/anet v0.0.5 // indirect
+ github.com/zeebo/blake3 v0.2.4 // indirect
go.opencensus.io v0.24.0 // indirect
- go.uber.org/dig v1.17.1 // indirect
- go.uber.org/fx v1.20.1 // indirect
- go.uber.org/mock v0.4.0 // indirect
+ go.uber.org/dig v1.19.0 // indirect
+ go.uber.org/fx v1.24.0 // indirect
+ go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.27.0 // indirect
- golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
- golang.org/x/mod v0.22.0 // indirect
- golang.org/x/text v0.23.0 // indirect
- golang.org/x/tools v0.29.0 // indirect
- google.golang.org/protobuf v1.36.1 // indirect
- gopkg.in/ini.v1 v1.57.0 // indirect
+ go.uber.org/zap v1.27.0
+ go.uber.org/zap/exp v0.3.0 // indirect
+ golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
+ golang.org/x/mod v0.29.0 // indirect
+ golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect
+ golang.org/x/text v0.31.0 // indirect
+ golang.org/x/tools v0.38.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- lukechampine.com/blake3 v1.2.1 // indirect
- rsc.io/tmplfunc v0.0.3 // indirect
+ lukechampine.com/blake3 v1.4.1 // indirect
)
replace github.com/codahale/hdrhistogram => github.com/HdrHistogram/hdrhistogram-go v0.0.0-20200919145931-8dac23c8dac1
diff --git a/go.sum b/go.sum
index b07fdc93fc9..fab9e1c2f69 100644
--- a/go.sum
+++ b/go.sum
@@ -59,8 +59,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
-github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
@@ -105,8 +105,6 @@ github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7
github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0=
github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM=
github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -138,6 +136,10 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
+github.com/caddyserver/certmagic v0.21.6 h1:1th6GfprVfsAtFNOu4StNMF5IxK5XiaI0yZhAHlZFPE=
+github.com/caddyserver/certmagic v0.21.6/go.mod h1:n1sCo7zV1Ez2j+89wrzDxo4N/T1Ws/Vx8u5NvuBFabw=
+github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
+github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
@@ -158,7 +160,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@@ -175,26 +176,17 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
-github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs=
-github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
-github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
-github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
-github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
-github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
+github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI=
+github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
-github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
@@ -208,18 +200,19 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
-github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
-github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
+github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
+github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
@@ -228,16 +221,10 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
-github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
-github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
-github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -249,8 +236,8 @@ github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/d
github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI=
github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
-github.com/ethersphere/batch-archive v0.0.4 h1:PHmwQfmUEyDJgoX2IqI/R0alQ63+aLPXfdNNSmNLGwI=
-github.com/ethersphere/batch-archive v0.0.4/go.mod h1:41BPb192NoK9CYjNB8BAE1J2MtiI/5aq0Wtas5O7A7Q=
+github.com/ethersphere/batch-archive v0.0.5 h1:SM3g7Tuge4KhOn+NKgPcg2Uz2p8a/MLKZvZpmkKCyU4=
+github.com/ethersphere/batch-archive v0.0.5/go.mod h1:41BPb192NoK9CYjNB8BAE1J2MtiI/5aq0Wtas5O7A7Q=
github.com/ethersphere/go-price-oracle-abi v0.6.9 h1:bseen6he3PZv5GHOm+KD6s4awaFmVSD9LFx+HpB6rCU=
github.com/ethersphere/go-price-oracle-abi v0.6.9/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk=
github.com/ethersphere/go-storage-incentives-abi v0.9.4 h1:mSIWXQXg5OQmH10QvXMV5w0vbSibFMaRlBL37gPLTM0=
@@ -269,12 +256,13 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
-github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
+github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
@@ -297,34 +285,27 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
+github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
-github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
-github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
-github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
-github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
-github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
-github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4=
+github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
-github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -368,8 +349,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
-github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -386,21 +365,18 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
-github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -410,29 +386,27 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18=
+github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
-github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=
-github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
-github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
@@ -443,8 +417,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@@ -462,8 +437,8 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
-github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
@@ -502,10 +477,12 @@ github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bS
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
-github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
-github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
-github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
-github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
+github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
+github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
+github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
+github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
+github.com/ipshipyard/p2p-forge v0.7.0 h1:PQayexxZC1FR2Vx0XOSbmZ6wDPliidS48I+xXWuF+YU=
+github.com/ipshipyard/p2p-forge v0.7.0/go.mod h1:i2wg0p7WmHGyo5vYaK9COZBp8BN5Drncfu3WoQNZlQY=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
@@ -529,7 +506,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
-github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
@@ -546,26 +522,25 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
-github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
-github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY=
github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
-github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
+github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
+github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -579,15 +554,17 @@ github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
-github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
-github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
+github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
+github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
-github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
-github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
-github.com/libp2p/go-libp2p v0.33.2 h1:vCdwnFxoGOXMKmaGHlDSnL4bM3fQeW8pgIa9DECnb40=
-github.com/libp2p/go-libp2p v0.33.2/go.mod h1:zTeppLuCvUIkT118pFVzA8xzP/p2dJYOMApCkFh0Yww=
+github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
+github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
+github.com/libp2p/go-libp2p v0.46.0 h1:0T2yvIKpZ3DVYCuPOFxPD1layhRU486pj9rSlGWYnDM=
+github.com/libp2p/go-libp2p v0.46.0/go.mod h1:TbIDnpDjBLa7isdgYpbxozIVPBTmM/7qKOJP4SFySrQ=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
@@ -595,20 +572,20 @@ github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm
github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
-github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
-github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
-github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
-github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
+github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc=
+github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
-github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
-github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
+github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
+github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/lucas-clemente/quic-go v0.15.2/go.mod h1:qxmO5Y4ZMhdNkunGfxuZnZXnJwYpW9vjQkyrZ7BsgUI=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
+github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI=
github.com/marten-seemann/qtls v0.8.0/go.mod h1:Lao6jDqlCfxyLKYFmZXGm2LSHBgVn+P+ROOex6YkT+k=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
@@ -626,7 +603,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
@@ -636,11 +612,12 @@ github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mholt/acmez/v3 v3.0.0 h1:r1NcjuWR0VaKP2BTjDK9LRFBw/WvURx3jlaEUl9Ht8E=
+github.com/mholt/acmez/v3 v3.0.0/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
-github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
+github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
+github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@@ -663,9 +640,6 @@ github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxd
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
-github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
-github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
-github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -684,29 +658,27 @@ github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoR
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
-github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y=
github.com/multiformats/go-multiaddr v0.3.2/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0=
-github.com/multiformats/go-multiaddr v0.12.3 h1:hVBXvPRcKG0w80VinQ23P5t7czWgg65BmIvQKjDydU8=
-github.com/multiformats/go-multiaddr v0.12.3/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII=
-github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
-github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
+github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw=
+github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
+github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
+github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
-github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
-github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
+github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
+github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
-github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
-github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
-github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
+github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
+github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
@@ -728,20 +700,16 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
-github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
-github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
-github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
+github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@@ -762,26 +730,59 @@ github.com/peterh/liner v1.2.1 h1:O4BlKaq/LWu6VRWmol4ByWfzx6MfXc5Op5HETyIy5yg=
github.com/peterh/liner v1.2.1/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pion/dtls/v2 v2.2.8 h1:BUroldfiIbV9jSnC6cKOMnyiORRWrWWpV11JUyEu5OA=
-github.com/pion/dtls/v2 v2.2.8/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
-github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
+github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
+github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
+github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
+github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
+github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
+github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
+github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
+github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
+github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
+github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
+github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
+github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
+github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
+github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
+github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
+github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
+github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
+github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
+github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
+github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
+github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
+github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
+github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
+github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
+github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
+github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
+github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
+github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0=
github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ=
-github.com/pion/transport/v2 v2.2.4 h1:41JJK6DZQYSeVLxILA2+F4ZkKb4Xd/tFJZRFZQ9QAlo=
+github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
+github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
+github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
-github.com/pion/transport/v3 v3.0.1 h1:gDTlPJwROfSfz6QfSi0ZmeCSkFcnWWiiR9ES0ouANiM=
-github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
+github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
+github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
+github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
+github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
+github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
+github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
+github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
@@ -793,14 +794,14 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
-github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
-github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -812,8 +813,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
-github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
+github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
+github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -824,20 +825,19 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
+github.com/prometheus/statsd_exporter v0.26.1 h1:ucbIAdPmwAUcA+dU+Opok8Qt81Aw8HanlO+2N/Wjv7w=
+github.com/prometheus/statsd_exporter v0.26.1/go.mod h1:XlDdjAmRmx3JVvPPYuFNUg+Ynyb5kR69iPPkQjxXFMk=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
-github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
-github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
-github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM=
-github.com/quic-go/quic-go v0.42.0/go.mod h1:132kz4kL3F9vxhW3CtQJLDVwcFe5wdWeJXXijhsO57M=
-github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY=
-github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc=
-github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
-github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
+github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
+github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
+github.com/quic-go/quic-go v0.57.1 h1:25KAAR9QR8KZrCZRThWMKVAwGoiHIrNbT72ULHTuI10=
+github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s=
+github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
+github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -845,10 +845,8 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr
github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
@@ -890,11 +888,7 @@ github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5k
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v1.1.1 h1:T/YLemO5Yp7KPzS+lVtu+WsHn8yoSwTfItdAd1r3cck=
-github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
@@ -913,8 +907,9 @@ github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3k
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
@@ -932,8 +927,10 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
@@ -961,15 +958,13 @@ github.com/uber/jaeger-client-go v2.24.0+incompatible h1:CGchgJcHsDd2jWnaL4XngBy
github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
-github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2ellBfvnqc=
-github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
+github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
+github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/wealdtech/go-ens/v3 v3.5.1 h1:0VqkCjIGfIVdwHIf2QqYWWt3bbR1UE7RwBGx7YPpufQ=
@@ -979,6 +974,9 @@ github.com/wealdtech/go-multicodec v1.4.0/go.mod h1:aedGMaTeYkIqi/KCPre1ho5rTb3h
github.com/wealdtech/go-string2eth v1.1.0 h1:USJQmysUrBYYmZs7d45pMb90hRSyEwizP7lZaOZLDAw=
github.com/wealdtech/go-string2eth v1.1.0/go.mod h1:RUzsLjJtbZaJ/3UKn9kY19a/vCCUHtEWoUW3uiK6yGU=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
+github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
@@ -987,7 +985,13 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
+github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
+github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
+github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
+github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
gitlab.com/nolash/go-mockbytes v0.0.7 h1:9XVFpEfY67kGBVJve3uV19kzqORdlo7V+q09OE6Yo54=
gitlab.com/nolash/go-mockbytes v0.0.7/go.mod h1:KKOpNTT39j2Eo+P6uUTOncntfeKY6AFh/2CxuD5MpgE=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
@@ -1002,27 +1006,25 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
-go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
-go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk=
-go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg=
-go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
+go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
+go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
-go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
+go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
+go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
+go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1047,9 +1049,12 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
+golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1063,8 +1068,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
-golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -1088,8 +1093,10 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
-golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1136,14 +1143,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
-golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1166,9 +1177,10 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
-golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1203,7 +1215,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1225,37 +1236,41 @@ golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
+golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1264,15 +1279,19 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
-golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1325,9 +1344,10 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
-golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
+golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1430,8 +1450,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
-google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1443,8 +1463,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
-gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
@@ -1479,8 +1499,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
-lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
-lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
+lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
+lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
resenje.org/daemon v0.1.2/go.mod h1:mF5JRpH3EbrxI9WoeKY78e6PqSsbBtX9jAQL5vj/GBA=
resenje.org/email v0.1.3/go.mod h1:OhAVLRG3vqd9NSgayN3pAgzxTmc2B6mAefgShZvEgf0=
resenje.org/feed v0.1.2 h1:3OianQkoI4EalWx1SlzHtGjUMsoB4XTJQbeehWiyeFI=
@@ -1500,7 +1520,5 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
-rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/openapi/Swarm.yaml b/openapi/Swarm.yaml
index f3fa6189dc3..4168e63f80e 100644
--- a/openapi/Swarm.yaml
+++ b/openapi/Swarm.yaml
@@ -1,12 +1,12 @@
openapi: 3.0.3
info:
- version: 7.3.0
+ version: 7.4.1
title: Bee API
- description: "A list of the currently provided Interfaces to interact with the swarm, implementing file operations and sending messages"
+ description: "API endpoints for interacting with the Swarm network, supporting file operations, messaging, and node management"
externalDocs:
- description: Browse the documentation @ the Swarm Docs
+ description: Browse the documentation at the Swarm Docs
url: "https://docs.ethswarm.org"
servers:
@@ -30,7 +30,7 @@ servers:
paths:
"/grantee":
post:
- summary: "Create grantee list"
+ summary: "Create a grantee list"
tags:
- ACT
parameters:
@@ -79,7 +79,7 @@ paths:
"/grantee/{reference}":
get:
- summary: "Get grantee list"
+ summary: "Get the grantee list"
tags:
- ACT
parameters:
@@ -103,7 +103,7 @@ paths:
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
patch:
- summary: "Update grantee list"
+ summary: "Update the grantee list"
description: "Add or remove grantees from an existing grantee list"
tags:
- ACT
@@ -163,37 +163,12 @@ paths:
tags:
- Bytes
parameters:
- - in: header
- schema:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId"
- name: swarm-postage-batch-id
- required: true
- - in: header
- schema:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmTagParameter"
- name: swarm-tag
- required: false
- - in: header
- schema:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter"
- name: swarm-pin
- required: false
- - in: header
- schema:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmDeferredUpload"
- name: swarm-deferred-upload
- required: false
- - in: header
- schema:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmEncryptParameter"
- name: swarm-encrypt
- required: false
- - in: header
- schema:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyLevelParameter"
- name: swarm-redundancy-level
- required: false
-
+ - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId"
+ - $ref: "SwarmCommon.yaml#/components/parameters/SwarmTagParameter"
+ - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter"
+ - $ref: "SwarmCommon.yaml#/components/parameters/SwarmDeferredUpload"
+ - $ref: "SwarmCommon.yaml#/components/parameters/SwarmEncryptParameter"
+ - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyLevelParameter"
requestBody:
content:
application/octet-stream:
@@ -221,7 +196,7 @@ paths:
"/bytes/{reference}":
get:
- summary: "Get referenced data"
+ summary: "Retrieve data by reference"
tags:
- Bytes
parameters:
@@ -252,7 +227,7 @@ paths:
default:
description: Default response
head:
- summary: Requests the headers containing the content type and length for the reference
+ summary: Retrieve headers containing the content type and length for the reference
tags:
- Bytes
parameters:
@@ -293,7 +268,7 @@ paths:
"/chunks":
post:
- summary: "Upload chunk"
+ summary: "Upload a chunk"
tags:
- Chunk
parameters:
@@ -307,7 +282,7 @@ paths:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmAct"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress"
requestBody:
- description: Chunk binary data that has to have at least 8 bytes.
+ description: Chunk binary data containing at least 8 bytes.
content:
application/octet-stream:
schema:
@@ -318,7 +293,7 @@ paths:
description: OK
headers:
"swarm-tag":
- description: Tag UID if it was passed to the request `swarm-tag` header.
+ description: Tag UID from the request `swarm-tag` header if provided.
schema:
$ref: "SwarmCommon.yaml#/components/schemas/Uid"
"swarm-act-history-address":
@@ -338,8 +313,8 @@ paths:
"/chunks/stream":
get:
- summary: "Upload stream of chunks"
- description: "Returns a WebSocket connection on which stream of chunks can be uploaded. Each chunk sent is acknowledged using a binary response `0` which serves as confirmation of upload of single chunk. Chunks should be packaged as binary messages for uploading. If a tag is specified, the chunks will be streamed into local storage and then be uploaded to the network once the stream is closed. If a tag is not specified, the chunks will bypass local storage and be directly uploaded to the network through the stream as they arrive."
+ summary: "Stream chunks for upload"
+ description: "Establishes a WebSocket connection for streaming chunks. Each uploaded chunk receives a binary acknowledgment (`0`). Chunks are sent as binary messages. When a tag is specified, chunks are stored locally and uploaded to the network after the stream closes. Without a tag, chunks are directly uploaded to the network as they arrive."
tags:
- Chunk
parameters:
@@ -354,13 +329,9 @@ paths:
description: Default response
"/bzz":
post:
- summary: "Upload file or a collection of files"
+ summary: "Upload a file or collection of files"
description:
- "In order to upload a collection, user can send a multipart request with all the files populated in the form data with appropriate headers.\n\n
- User can also upload a tar file along with the swarm-collection header. This will upload the tar file after extracting the entire directory structure.\n\n
- If the swarm-collection header is absent, all requests (including tar files) are considered as single file uploads.\n\n
- A multipart request is treated as a collection regardless of whether the swarm-collection header is present. This means in order to serve single files
- uploaded as a multipart request, the swarm-index-document header should be used with the name of the file."
+ "Upload single files or collections of files. To upload a collection, send a multipart request with files in the form data with appropriate headers. Tar files can be uploaded with the `swarm-collection` header to extract and upload the directory structure. Without the `swarm-collection` header, requests are treated as single file uploads. Multipart requests are always treated as collections; use the `swarm-index-document` header to specify a single file to serve."
tags:
- BZZ
parameters:
@@ -425,7 +396,7 @@ paths:
"/bzz/{reference}":
get:
- summary: "Get file or index document from a collection of files"
+ summary: "Retrieve a file or index document from a collection"
tags:
- BZZ
parameters:
@@ -453,6 +424,11 @@ paths:
schema:
type: string
format: binary
+ headers:
+ "swarm-feed-resolved-version":
+ $ref: "SwarmCommon.yaml#/components/headers/SwarmFeedResolvedVersion"
+
+
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
"404":
@@ -462,7 +438,7 @@ paths:
default:
description: Default response
head:
- summary: Get the headers containing the content type and length for the reference
+ summary: Retrieve headers with content type and length for the reference
tags:
- BZZ
parameters:
@@ -487,7 +463,7 @@ paths:
"/bzz/{reference}/{path}":
get:
- summary: "Get referenced file from a collection of files"
+ summary: "Retrieve a file from a collection by path"
tags:
- BZZ
parameters:
@@ -514,6 +490,9 @@ paths:
schema:
type: string
format: binary
+ headers:
+ "swarm-feed-resolved-version":
+ $ref: "SwarmCommon.yaml#/components/headers/SwarmFeedResolvedVersion"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
@@ -563,12 +542,6 @@ paths:
tags:
- Tag
description: Tags can be thought of as upload sessions which can be tracked using the tags endpoint. It will keep track of the chunks that are uploaded as part of the tag and will push them out to the network once a done split is called on the Tag. This happens internally if you use the `Swarm-Deferred-Upload` header.
- requestBody:
- required: true
- content:
- application/json:
- schema:
- $ref: "SwarmCommon.yaml#/components/schemas/NewTagRequest"
responses:
"201":
description: New Tag Info
@@ -672,12 +645,12 @@ paths:
required: true
description: Swarm reference of the root hash
post:
- summary: Pin the root hash with the given reference
+ summary: Pin a root hash by reference
tags:
- Pinning
responses:
"200":
- description: Pin already exists, so no operation
+ description: Pin already exists
content:
application/json:
schema:
@@ -697,12 +670,12 @@ paths:
default:
description: Default response
delete:
- summary: Unpin the root hash with the given reference
+ summary: Unpin a root hash by reference
tags:
- Pinning
responses:
"200":
- description: Unpinning root hash with reference
+ description: Root hash has been unpinned
content:
application/json:
schema:
@@ -714,12 +687,12 @@ paths:
default:
description: Default response
get:
- summary: Get pinning status of the root hash with the given reference
+ summary: Get the pinning status of a root hash
tags:
- Pinning
responses:
"200":
- description: Reference of the pinned root hash
+ description: The pinned root hash reference
content:
application/json:
schema:
@@ -752,7 +725,7 @@ paths:
"/pins/check":
get:
- summary: Validate pinned chunks integerity
+ summary: Validate pinned chunks integrity
tags:
- Pinning
parameters:
@@ -761,7 +734,7 @@ paths:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/SwarmOnlyReference"
required: false
- description: The number of items to skip before starting to collect the result set.
+ description: Optional reference to check; if not provided, all pinned references are checked
responses:
"200":
description: List of checked root hash references
@@ -776,7 +749,7 @@ paths:
"/pss/send/{topic}/{targets}":
post:
- summary: Send to recipient or target with Postal Service for Swarm
+ summary: Send a message using the Postal Service for Swarm
tags:
- Postal Service for Swarm
parameters:
@@ -813,7 +786,7 @@ paths:
"/pss/subscribe/{topic}":
get:
- summary: Subscribe for messages on the given topic.
+ summary: Subscribe to messages on a topic
tags:
- Postal Service for Swarm
parameters:
@@ -825,7 +798,7 @@ paths:
description: Topic name
responses:
"200":
- description: Returns a WebSocket with a subscription for incoming message data on the requested topic.
+ description: Establishes a WebSocket subscription for incoming messages on the topic
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
@@ -845,7 +818,7 @@ paths:
description: "Single Owner Chunk address (which may have multiple payloads)"
responses:
"200":
- description: Returns a WebSocket with a subscription for incoming message data on the requested SOC address.
+ description: Establishes a WebSocket subscription for incoming messages on the Single Owner Chunk address
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
@@ -853,7 +826,7 @@ paths:
"/soc/{owner}/{id}":
post:
- summary: Upload single owner chunk
+ summary: Upload a Single Owner Chunk
tags:
- Single owner chunk
parameters:
@@ -862,13 +835,13 @@ paths:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/EthereumAddress"
required: true
- description: Owner
+ description: Ethereum address of the chunk owner
- in: path
name: id
schema:
$ref: "SwarmCommon.yaml#/components/schemas/HexString"
required: true
- description: Id
+ description: Unique identifier for the chunk
- in: query
name: sig
schema:
@@ -885,7 +858,7 @@ paths:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress"
requestBody:
required: true
- description: The SOC binary data is composed of the span (8 bytes) and the at most 4KB payload.
+ description: The SOC binary data, composed of the span (8 bytes) and up to 4KB of payload.
content:
application/octet-stream:
schema:
@@ -912,7 +885,7 @@ paths:
default:
description: Default response
get:
- summary: Resolve Single Owner Chunk data
+ summary: Retrieve Single Owner Chunk data
tags:
- Single owner chunk
parameters:
@@ -927,7 +900,7 @@ paths:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/HexString"
required: true
- description: Arbitrary identifier of the related data
+ description: Unique identifier for the chunk data
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmOnlyRootChunkParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmCache"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyStrategyParameter"
@@ -955,7 +928,7 @@ paths:
"/feeds/{owner}/{topic}":
post:
- summary: Create an initial feed root manifest
+ summary: Create a feed root manifest
tags:
- Feed
parameters:
@@ -964,13 +937,13 @@ paths:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/EthereumAddress"
required: true
- description: Owner
+ description: Ethereum address of the feed owner
- in: path
name: topic
schema:
$ref: "SwarmCommon.yaml#/components/schemas/HexString"
required: true
- description: Topic
+ description: Topic identifier for the feed
- in: query
name: type
schema:
@@ -1002,7 +975,7 @@ paths:
default:
description: Default response
get:
- summary: Find feed update
+ summary: Retrieve the latest feed update
tags:
- Feed
parameters:
@@ -1011,13 +984,13 @@ paths:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/EthereumAddress"
required: true
- description: Owner
+ description: Ethereum address of the feed owner
- in: path
name: topic
schema:
$ref: "SwarmCommon.yaml#/components/schemas/HexString"
required: true
- description: Topic
+ description: Topic identifier for the feed
- in: query
name: at
schema:
@@ -1051,6 +1024,9 @@ paths:
$ref: "SwarmCommon.yaml#/components/headers/SwarmFeedIndex"
"swarm-feed-index-next":
$ref: "SwarmCommon.yaml#/components/headers/SwarmFeedIndexNext"
+ "swarm-feed-resolved-version":
+ $ref: "SwarmCommon.yaml#/components/headers/SwarmFeedResolvedVersion"
+
content:
application/octet-stream:
schema:
@@ -1067,7 +1043,7 @@ paths:
"/stewardship/{reference}":
get:
- summary: "Check if content is available"
+ summary: "Check content availability"
tags:
- Stewardship
parameters:
@@ -1091,7 +1067,7 @@ paths:
default:
description: Default response
put:
- summary: "Re-upload content for specified root hash"
+ summary: "Re-upload content by reference"
tags:
- Stewardship
parameters:
@@ -1137,7 +1113,7 @@ paths:
"/health":
get:
- summary: Get node overall health Status
+ summary: Get the overall health status of the node
description: |
Health Status will indicate node healthiness.
@@ -1156,7 +1132,7 @@ paths:
"/readiness":
get:
- summary: Readiness endpoint indicates if node is ready to start accepting traffic
+ summary: Check if the node is ready to accept traffic
tags:
- Status
responses:
@@ -1171,7 +1147,7 @@ paths:
"/balances":
get:
- summary: Get the balances with all known peers including prepaid services
+ summary: Get balances with all known peers
tags:
- Balance
responses:
@@ -1188,7 +1164,7 @@ paths:
"/balances/{address}":
get:
- summary: Get the balances with a specific peer including prepaid services
+ summary: Get the balance with a specific peer
tags:
- Balance
parameters:
@@ -1231,7 +1207,7 @@ paths:
"/consumed":
get:
- summary: Get the past due consumption balances with all known peers
+ summary: Get past due consumption balances with all known peers
tags:
- Balance
responses:
@@ -1248,7 +1224,7 @@ paths:
"/consumed/{address}":
get:
- summary: Get the past due consumption balance with a specific peer
+ summary: Get past due consumption balance with a specific peer
tags:
- Balance
parameters:
@@ -1274,7 +1250,7 @@ paths:
"/chequebook/address":
get:
- summary: Get the address of the chequebook contract used
+ summary: Get the chequebook contract address
tags:
- Chequebook
responses:
@@ -1304,7 +1280,7 @@ paths:
"/chunks/{address}":
get:
- summary: "Get chunk"
+ summary: "Retrieve a chunk"
tags:
- Chunk
parameters:
@@ -1339,7 +1315,7 @@ paths:
default:
description: Default response
head:
- summary: Check if chunk at address exists locally
+ summary: Check if a chunk exists locally
tags:
- Chunk
parameters:
@@ -1364,7 +1340,7 @@ paths:
"/envelope/{address}":
post:
- summary: "Create postage stamp signature against given chunk address"
+ summary: "Create a postage stamp for a chunk"
tags:
- Envelope
parameters:
@@ -1391,7 +1367,7 @@ paths:
"/connect/{multiAddress}":
post:
- summary: Connect to address
+ summary: Connect to a peer address
tags:
- Connectivity
parameters:
@@ -1418,7 +1394,7 @@ paths:
"/reservestate":
get:
- summary: Get reserve state
+ summary: Get the reserve state
tags:
- Status
responses:
@@ -1433,7 +1409,7 @@ paths:
"/chainstate":
get:
- summary: Get chain state
+ summary: Get the chain state
tags:
- Status
responses:
@@ -1448,7 +1424,7 @@ paths:
"/node":
get:
- summary: Get information about the node
+ summary: Get node information
tags:
- Status
responses:
@@ -1463,7 +1439,7 @@ paths:
"/peers":
get:
- summary: Get a list of peers
+ summary: Get the list of connected peers
tags:
- Connectivity
responses:
@@ -1478,7 +1454,7 @@ paths:
"/peers/{address}":
delete:
- summary: Remove peer
+ summary: Disconnect from a peer
tags:
- Connectivity
parameters:
@@ -1490,7 +1466,7 @@ paths:
description: Swarm address of peer
responses:
"200":
- description: Disconnected peer
+ description: Peer has been disconnected
content:
application/json:
schema:
@@ -1504,7 +1480,7 @@ paths:
"/pingpong/{address}":
post:
- summary: Try connection to node
+ summary: Ping a peer to measure latency
tags:
- Connectivity
parameters:
@@ -1532,7 +1508,7 @@ paths:
"/settlements/{address}":
get:
- summary: Get amount of sent and received from settlements with a peer
+ summary: Get settlement amounts sent and received with a peer
tags:
- Settlements
parameters:
@@ -1544,7 +1520,7 @@ paths:
description: Swarm address of peer
responses:
"200":
- description: Amount of sent or received from settlements with a peer
+ description: Settlement amounts sent and received with the peer
content:
application/json:
schema:
@@ -1558,7 +1534,7 @@ paths:
"/settlements":
get:
- summary: Get settlements with all known peers and total amount sent or received
+ summary: Get settlements with all known peers and totals
tags:
- Settlements
responses:
@@ -1575,7 +1551,7 @@ paths:
"/timesettlements":
get:
- summary: Get time based settlements with all known peers and total amount sent or received
+ summary: Get time-based settlements with all known peers and totals
tags:
- Settlements
responses:
@@ -1592,7 +1568,7 @@ paths:
"/topology":
get:
- summary: Get topology of known network
+ summary: Get the network topology
tags:
- Connectivity
responses:
@@ -1605,7 +1581,7 @@ paths:
"/welcome-message":
get:
- summary: Get configured P2P welcome message
+ summary: Get the P2P welcome message
tags:
- Connectivity
responses:
@@ -1620,7 +1596,7 @@ paths:
default:
description: Default response
post:
- summary: Set P2P welcome message
+ summary: Set the P2P welcome message
tags:
- Connectivity
requestBody:
@@ -1644,7 +1620,7 @@ paths:
"/chequebook/cashout/{peer-id}":
get:
- summary: Get last cashout action for the peer
+ summary: Get the last cashout status for a peer
parameters:
- in: path
name: peer-id
@@ -1668,7 +1644,7 @@ paths:
default:
description: Default response
post:
- summary: Cashout the last cheque for the peer
+ summary: Cash out the last cheque for a peer
parameters:
- in: path
name: peer-id
@@ -1682,7 +1658,7 @@ paths:
- Chequebook
responses:
"201":
- description: OK
+ description: Cheque has been cashed out
content:
application/json:
schema:
@@ -1698,7 +1674,7 @@ paths:
"/chequebook/cheque/{peer-id}":
get:
- summary: Get last cheques for the peer
+ summary: Get the last cheques for a peer
parameters:
- in: path
name: peer-id
@@ -1710,7 +1686,7 @@ paths:
- Chequebook
responses:
"200":
- description: Last cheques
+ description: The last cheques for the peer
content:
application/json:
schema:
@@ -1724,12 +1700,12 @@ paths:
"/chequebook/cheque":
get:
- summary: Get last cheques for all peers
+ summary: Get the last cheques for all peers
tags:
- Chequebook
responses:
"200":
- description: Last cheques
+ description: The last cheques for all peers
content:
application/json:
schema:
@@ -1743,14 +1719,14 @@ paths:
"/chequebook/deposit":
post:
- summary: Deposit tokens from overlay address into chequebook
+ summary: Deposit tokens into the chequebook
parameters:
- in: query
name: amount
schema:
type: integer
required: true
- description: amount of tokens to deposit
+ description: Amount of tokens to deposit
- $ref: "SwarmCommon.yaml#/components/parameters/GasPriceParameter"
tags:
- Chequebook
@@ -1770,14 +1746,14 @@ paths:
"/chequebook/withdraw":
post:
- summary: Withdraw tokens from the chequebook to the overlay address
+ summary: Withdraw tokens from the chequebook
parameters:
- in: query
name: amount
schema:
type: integer
required: true
- description: amount of tokens to withdraw
+ description: Amount of tokens to withdraw
- $ref: "SwarmCommon.yaml#/components/parameters/GasPriceParameter"
tags:
- Chequebook
@@ -1814,7 +1790,7 @@ paths:
"/transactions/{txHash}":
get:
- summary: Get information about a sent transaction
+ summary: Retrieve transaction information
parameters:
- in: path
name: txHash
@@ -1826,7 +1802,7 @@ paths:
- Transaction
responses:
"200":
- description: Get info about transaction
+ description: Transaction information
content:
application/json:
schema:
@@ -1838,7 +1814,7 @@ paths:
default:
description: Default response
post:
- summary: Rebroadcast existing transaction
+ summary: Rebroadcast a transaction
parameters:
- in: path
name: txHash
@@ -1889,12 +1865,12 @@ paths:
"/stamps":
get:
- summary: Get stamps for this node
+ summary: Get postage stamps for this node
tags:
- Postage Stamps
responses:
"200":
- description: Returns an array of postage batches.
+ description: An array of postage stamps
content:
application/json:
schema:
@@ -1961,7 +1937,7 @@ paths:
post:
summary: Buy a new postage batch.
description: |
- Be aware, this endpoint creates an on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance!
+ Be aware, this endpoint creates an on-chain transaction and transfers BZZ from the node's Ethereum account, directly affecting the wallet balance!
tags:
- Postage Stamps
parameters:
@@ -1976,7 +1952,7 @@ paths:
schema:
type: integer
required: true
- description: Batch depth which specifies how many chunks can be signed with the batch. It is a logarithm. Must be higher than default bucket depth (16)
+ description: Batch depth (logarithm) specifying the maximum number of chunks this stamp can cover. Must be greater than the default bucket depth (16)
- in: query
name: label
schema:
@@ -2010,7 +1986,7 @@ paths:
patch:
summary: Top up an existing postage batch.
description: |
- Be aware, this endpoint creates on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance!
+ Be aware, this endpoint creates an on-chain transaction and transfers BZZ from the node's Ethereum account, directly affecting the wallet balance!
tags:
- Postage Stamps
parameters:
@@ -2050,7 +2026,7 @@ paths:
patch:
summary: Dilute an existing postage batch.
description: |
- Be aware, this endpoint creates on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance!
+ Be aware, this endpoint creates an on-chain transaction and transfers BZZ from the node's Ethereum account, directly affecting the wallet balance!
tags:
- Postage Stamps
parameters:
@@ -2065,7 +2041,7 @@ paths:
schema:
type: integer
required: true
- description: New batch depth. Must be higher than the previous depth.
+ description: The new batch depth, which must be greater than the current depth
- $ref: "SwarmCommon.yaml#/components/parameters/GasPriceParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/GasLimitParameter"
responses:
@@ -2086,12 +2062,12 @@ paths:
"/batches":
get:
- summary: Get all globally available batches that were purchased by all nodes.
+ summary: Get all globally available postage batches
tags:
- Postage Stamps
responses:
"200":
- description: Returns an array of all available and currently valid postage batches.
+ description: An array of all available and valid postage batches
content:
application/json:
schema:
@@ -2140,7 +2116,7 @@ paths:
"/accounting":
get:
- summary: Get all accounting associated values with all known peers
+ summary: Get accounting values for all known peers
tags:
- Balance
responses:
@@ -2157,7 +2133,7 @@ paths:
"/redistributionstate":
get:
- summary: Get current status of node in redistribution game
+ summary: Get the node's redistribution game status
tags:
- RedistributionState
responses:
@@ -2175,7 +2151,7 @@ paths:
description: Default response
"/wallet":
get:
- summary: Get wallet balance for BZZ and xDai
+ summary: Get wallet balance for BZZ and xDAI
tags:
- Wallet
responses:
@@ -2191,7 +2167,7 @@ paths:
description: Default response
"/wallet/withdraw/{coin}":
post:
- summary: Allows withdrawals of BZZ or xDAI to provided (whitelisted) address
+ summary: Withdraw BZZ or xDAI to a whitelisted address
tags:
- Wallet
parameters:
@@ -2266,8 +2242,8 @@ paths:
"/stake/{amount}":
post:
- summary: Deposit some amount for staking.
- description: Be aware, this endpoint creates an on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance.
+ summary: Deposit an amount for staking.
+ description: Be aware, this endpoint creates an on-chain transaction and transfers BZZ from the node's Ethereum account, directly affecting the wallet balance.
tags:
- Staking
parameters:
@@ -2311,8 +2287,8 @@ paths:
default:
description: Default response
delete:
- summary: Withdraws all past staked amount back to the wallet.
- description: Be aware, this endpoint can only be called when the contract is paused and is in the process of being migrated to a new contract.
+ summary: Withdraw all previously staked amounts.
+ description: Be aware, this endpoint can only be called when the contract is paused and undergoing migration to a new contract.
tags:
- Staking
parameters:
diff --git a/openapi/SwarmCommon.yaml b/openapi/SwarmCommon.yaml
index 9fa0bdb7528..ab39fa48555 100644
--- a/openapi/SwarmCommon.yaml
+++ b/openapi/SwarmCommon.yaml
@@ -2,8 +2,7 @@ openapi: 3.0.3
info:
version: 4.3.0
title: Common Data Types
- description: |
- \*****bzzz*****
+ description: Common data structures and types used throughout the Bee API
externalDocs:
description: Browse the documentation @ the Swarm Docs
@@ -37,7 +36,7 @@ components:
$ref: "#/components/schemas/PublicKey"
BigInt:
- description: Numeric string that represents integer which might exceed `Number.MAX_SAFE_INTEGER` limit (2^53-1)
+ description: Numeric string representing an integer that may exceed `Number.MAX_SAFE_INTEGER` (2^53-1)
type: string
example: "1000000000000000000"
@@ -50,6 +49,8 @@ components:
type: integer
commitment:
type: integer
+ reserveCapacityDoubling:
+ type: integer
ChainState:
type: object
@@ -240,12 +241,12 @@ components:
example: "2020-06-11T11:26:42.6969797+02:00"
Duration:
- description: Go time.Duration format
+ description: Time duration in Go time.Duration format (e.g., 5.0018ms)
type: string
example: "5.0018ms"
Seconds:
- description: Go time.Duration format in seconds
+ description: Time duration in seconds (Go time.Duration format)
type: number
example: 30.5
@@ -293,12 +294,6 @@ components:
MultiAddress:
type: string
- NewTagRequest:
- type: object
- properties:
- address:
- $ref: "#/components/schemas/SwarmAddress"
-
NewTagResponse:
type: object
properties:
@@ -511,7 +506,7 @@ components:
utilization:
type: integer
usable:
- description: Indicate that the batch was discovered by the Bee node, but it awaits enough on-chain confirmations before declaring the batch as usable.
+ description: Indicates whether the batch was discovered by the Bee node and has received sufficient on-chain confirmations
type: boolean
label:
type: string
@@ -838,7 +833,7 @@ components:
LoggerExp:
type: string
- description: Base 64 encoded regular expression or subsystem string.
+ description: Base64-encoded regular expression or subsystem string
pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$"
example: "b25lL25hbWU="
@@ -1075,6 +1070,13 @@ components:
schema:
type: string
+ SwarmFeedResolvedVersion:
+ schema:
+ type: string
+ required: false
+ description: "Indicates which feed version was resolved (v1 or v2)"
+
+
parameters:
GasPriceParameter:
in: header
@@ -1107,7 +1109,7 @@ components:
type: boolean
required: false
description: >
- Represents if the uploaded data should be also locally pinned on the node.
+ Indicates whether the uploaded data should also be locally pinned on this node
SwarmEncryptParameter:
in: header
@@ -1116,7 +1118,7 @@ components:
type: boolean
required: false
description: >
- Represents the encrypting state of the file
+ Indicates whether the file should be encrypted
SwarmRedundancyLevelParameter:
in: header
@@ -1137,14 +1139,11 @@ components:
enum: [0, 1, 2, 3]
required: false
description: >
- Specify the retrieve strategy on redundant data.
- The numbers stand for NONE, DATA, PROX and RACE, respectively.
- Strategy NONE means no prefetching takes place.
- Strategy DATA means only data chunks are prefetched.
- Strategy PROX means only chunks that are close to the node are prefetched.
- Strategy RACE means all chunks are prefetched: n data chunks and k parity chunks. The first n chunks to arrive are used to reconstruct the file.
- Multiple strategies can be used in a fallback cascade if the swarm redundancy fallback mode is set to true.
- The default strategy is NONE, DATA, falling back to PROX, falling back to RACE
+ Specify the retrieval strategy for redundant data.
+ Values represent: NONE (0), DATA (1), PROX (2), RACE (3).
+ NONE: no prefetching. DATA: prefetch only data chunks. PROX: prefetch chunks near this node. RACE: prefetch all chunks and use the first n to arrive.
+ Multiple strategies can be cascaded if fallback mode is enabled.
+ Default: NONE > DATA > PROX > RACE
SwarmRedundancyFallbackModeParameter:
in: header
@@ -1186,7 +1185,7 @@ components:
type: string
example: index.html
required: false
- description: Default file to be referenced on path, if exists under that path
+ description: Default file to serve when a directory path is accessed
SwarmErrorDocumentParameter:
in: header
@@ -1195,7 +1194,7 @@ components:
type: string
example: error.html
required: false
- description: Configure custom error document to be returned when a specified path can not be found in collection
+ description: Custom error document to return when a path is not found in the collection
SwarmCollection:
in: header
@@ -1235,7 +1234,7 @@ components:
default: "true"
required: false
description: >
- Determines if the uploaded data should be sent to the network immediately or in a deferred fashion. By default the upload will be deferred.
+ Indicates whether the uploaded data should be sent to the network immediately or deferred. Default: deferred (true)
SwarmCache:
in: header
@@ -1244,7 +1243,7 @@ components:
type: boolean
default: "true"
required: false
- description: "Determines if the download data should be cached on the node. By default the download will be cached"
+ description: "Indicates whether downloaded data should be cached on the node. Default: cached (true)"
SwarmAct:
in: header
@@ -1282,7 +1281,7 @@ components:
responses:
"200":
- description: OK.
+ description: Success
"204":
description: The resource was deleted successfully.
"400":
diff --git a/packaging/bee.yaml b/packaging/bee.yaml
index 8176dfb47df..f5354f8e4d4 100644
--- a/packaging/bee.yaml
+++ b/packaging/bee.yaml
@@ -38,6 +38,8 @@ data-dir: "/var/lib/bee"
# help: false
## triggers connect to main net bootnodes.
# mainnet: true
+## minimum gas tip cap in wei for transactions, 0 means use suggested gas tip cap
+# minimum-gas-tip-cap: 0
## minimum radius storage threshold
# minimum-storage-radius: "0"
## NAT exposed address
@@ -108,8 +110,6 @@ password-file: "/var/lib/bee/password"
# tracing-service-name: bee
## skips the gas estimate step for contract transactions
# transaction-debug-mode: false
-## bootstrap node using postage snapshot from the network
-# use-postage-snapshot: false
## log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace
# verbosity: info
## maximum node warmup duration; proceeds when stable or after this time
@@ -118,3 +118,15 @@ password-file: "/var/lib/bee/password"
# welcome-message: ""
## withdrawal target addresses
# withdrawal-addresses-whitelist: []
+## enable wss p2p connections (default: false)
+# p2p-wss-enable: false
+## wss address (default: :1635)
+# p2p-wss-addr: :1635
+## WSS NAT exposed address
+# nat-wss-addr: ""
+## autotls domain (default: libp2p.direct)
+# autotls-domain: ""
+## autotls registration endpoint (default: https://registration.libp2p.direct)
+# autotls-registration-endpoint: ""
+## autotls ca endpoint (default: https://acme-v02.api.letsencrypt.org/directory)
+# autotls-ca-endpoint: ""
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
index bce886018ae..db118ff1ef0 100644
--- a/packaging/docker/README.md
+++ b/packaging/docker/README.md
@@ -2,49 +2,60 @@
The docker-compose provides an app container for Bee.
To prepare your machine to run docker compose execute
-```
+
+```bash
mkdir -p bee && cd bee
wget -q https://raw.githubusercontent.com/ethersphere/bee/master/packaging/docker/docker-compose.yml
wget -q https://raw.githubusercontent.com/ethersphere/bee/master/packaging/docker/env -O .env
```
+
Set all configuration variables inside `.env`
If you want to run node in full mode, set `BEE_FULL_NODE=true`
Bee requires an Ethereum endpoint to function. Obtain a free Infura account and set:
+
- `BEE_BLOCKCHAIN_RPC_ENDPOINT=wss://sepolia.infura.io/ws/v3/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`
Set bee password by either setting `BEE_PASSWORD` or `BEE_PASSWORD_FILE`
If you want to use password file set it to
+
- `BEE_PASSWORD_FILE=/password`
Mount password file local file system by adding
-```
+
+```yaml
- ./password:/password
```
+
to bee volumes inside `docker-compose.yml`
Start it with
-```
+
+```bash
docker-compose up -d
```
From logs find URL line with `on sepolia you can get both sepolia eth and sepolia bzz from` and prefund your node
-```
+
+```bash
docker-compose logs -f bee-1
```
Update services with
-```
+
+```bash
docker-compose pull && docker-compose up -d
```
## Running multiple Bee nodes
+
It is easy to run multiple bee nodes with docker compose by adding more services to `docker-compose.yaml`
To do so, open `docker-compose.yaml`, copy lines 4-54 and past this after line 54 (whole bee-1 section).
In the copied lines, replace all occurrences of `bee-1` with `bee-2` and adjust the `API_ADDR` and `P2P_ADDR` to respectively `1733`, `1734.`
Lastly, add your newly configured services under `volumes` (last lines), such that it looks like:
+
```yaml
volumes:
bee-1:
diff --git a/packaging/homebrew-amd64/bee.yaml b/packaging/homebrew-amd64/bee.yaml
index c771e5d72c2..811ffe62d60 100644
--- a/packaging/homebrew-amd64/bee.yaml
+++ b/packaging/homebrew-amd64/bee.yaml
@@ -38,6 +38,8 @@ data-dir: "/usr/local/var/lib/swarm-bee"
# help: false
## triggers connect to main net bootnodes.
# mainnet: true
+## minimum gas tip cap in wei for transactions, 0 means use suggested gas tip cap
+# minimum-gas-tip-cap: 0
## minimum radius storage threshold
# minimum-storage-radius: "0"
## NAT exposed address
@@ -108,8 +110,6 @@ password-file: "/usr/local/var/lib/swarm-bee/password"
# tracing-service-name: bee
## skips the gas estimate step for contract transactions
# transaction-debug-mode: false
-## bootstrap node using postage snapshot from the network
-# use-postage-snapshot: false
## log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace
# verbosity: info
## maximum node warmup duration; proceeds when stable or after this time
@@ -118,3 +118,15 @@ password-file: "/usr/local/var/lib/swarm-bee/password"
# welcome-message: ""
## withdrawal target addresses
# withdrawal-addresses-whitelist: []
+## enable wss p2p connections (default: false)
+# p2p-wss-enable: false
+## wss address (default: :1635)
+# p2p-wss-addr: :1635
+## WSS NAT exposed address
+# nat-wss-addr: ""
+## autotls domain (default: libp2p.direct)
+# autotls-domain: ""
+## autotls registration endpoint (default: https://registration.libp2p.direct)
+# autotls-registration-endpoint: ""
+## autotls ca endpoint (default: https://acme-v02.api.letsencrypt.org/directory)
+# autotls-ca-endpoint: ""
diff --git a/packaging/homebrew-arm64/bee.yaml b/packaging/homebrew-arm64/bee.yaml
index 057d6c54d32..382405cb149 100644
--- a/packaging/homebrew-arm64/bee.yaml
+++ b/packaging/homebrew-arm64/bee.yaml
@@ -38,6 +38,8 @@ data-dir: "/opt/homebrew/var/lib/swarm-bee"
# help: false
## triggers connect to main net bootnodes.
# mainnet: true
+## minimum gas tip cap in wei for transactions, 0 means use suggested gas tip cap
+# minimum-gas-tip-cap: 0
## minimum radius storage threshold
# minimum-storage-radius: "0"
## NAT exposed address
@@ -108,8 +110,6 @@ password-file: "/opt/homebrew/var/lib/swarm-bee/password"
# tracing-service-name: bee
## skips the gas estimate step for contract transactions
# transaction-debug-mode: false
-## bootstrap node using postage snapshot from the network
-# use-postage-snapshot: false
## log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace
# verbosity: info
## maximum node warmup duration; proceeds when stable or after this time
@@ -118,3 +118,15 @@ password-file: "/opt/homebrew/var/lib/swarm-bee/password"
# welcome-message: ""
## withdrawal target addresses
# withdrawal-addresses-whitelist: []
+## enable wss p2p connections (default: false)
+# p2p-wss-enable: false
+## wss address (default: :1635)
+# p2p-wss-addr: :1635
+## WSS NAT exposed address
+# nat-wss-addr: ""
+## autotls domain (default: libp2p.direct)
+# autotls-domain: ""
+## autotls registration endpoint (default: https://registration.libp2p.direct)
+# autotls-registration-endpoint: ""
+## autotls ca endpoint (default: https://acme-v02.api.letsencrypt.org/directory)
+# autotls-ca-endpoint: ""
diff --git a/packaging/scoop/bee.yaml b/packaging/scoop/bee.yaml
index d2f97fedd33..58b48f70b52 100644
--- a/packaging/scoop/bee.yaml
+++ b/packaging/scoop/bee.yaml
@@ -38,6 +38,8 @@ data-dir: "./data"
# help: false
## triggers connect to main net bootnodes.
# mainnet: true
+## minimum gas tip cap in wei for transactions, 0 means use suggested gas tip cap
+# minimum-gas-tip-cap: 0
## minimum radius storage threshold
# minimum-storage-radius: "0"
## NAT exposed address
@@ -108,8 +110,6 @@ password-file: "./password"
# tracing-service-name: bee
## skips the gas estimate step for contract transactions
# transaction-debug-mode: false
-## bootstrap node using postage snapshot from the network
-# use-postage-snapshot: false
## log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace
# verbosity: info
## maximum node warmup duration; proceeds when stable or after this time
@@ -118,3 +118,15 @@ password-file: "./password"
# welcome-message: ""
## withdrawal target addresses
# withdrawal-addresses-whitelist: []
+## enable wss p2p connections (default: false)
+# p2p-wss-enable: false
+## wss address (default: :1635)
+# p2p-wss-addr: :1635
+## WSS NAT exposed address
+# nat-wss-addr: ""
+## autotls domain (default: libp2p.direct)
+# autotls-domain: ""
+## autotls registration endpoint (default: https://registration.libp2p.direct)
+# autotls-registration-endpoint: ""
+## autotls ca endpoint (default: https://acme-v02.api.letsencrypt.org/directory)
+# autotls-ca-endpoint: ""
diff --git a/pkg/accesscontrol/access.go b/pkg/accesscontrol/access.go
index 0b7f9a094ac..71ddd3500c5 100644
--- a/pkg/accesscontrol/access.go
+++ b/pkg/accesscontrol/access.go
@@ -129,7 +129,7 @@ func (al *ActLogic) getAccessKey(ctx context.Context, storage kvs.KeyValueStore,
// Generate lookup key and access key decryption key for a given public key.
func (al *ActLogic) getKeys(publicKey *ecdsa.PublicKey) ([]byte, []byte, error) {
nonces := [][]byte{zeroByteArray, oneByteArray}
- keys, err := al.Session.Key(publicKey, nonces)
+ keys, err := al.Key(publicKey, nonces)
if len(keys) != len(nonces) {
return nil, nil, err
}
diff --git a/pkg/accesscontrol/access_test.go b/pkg/accesscontrol/access_test.go
index 0b2eeaa1e56..082a995ad03 100644
--- a/pkg/accesscontrol/access_test.go
+++ b/pkg/accesscontrol/access_test.go
@@ -71,7 +71,6 @@ func getPrivKey(keyNumber int) *ecdsa.PrivateKey {
}
func TestDecryptRef_Publisher(t *testing.T) {
- t.Parallel()
ctx := context.Background()
id1 := getPrivKey(1)
s := kvsmock.New()
diff --git a/pkg/accesscontrol/controller.go b/pkg/accesscontrol/controller.go
index 804c307ce8c..cc3c8ae6fd7 100644
--- a/pkg/accesscontrol/controller.go
+++ b/pkg/accesscontrol/controller.go
@@ -261,7 +261,7 @@ func (c *ControllerStruct) getGranteeList(ctx context.Context, ls file.LoadSaver
}
func (c *ControllerStruct) encryptRefForPublisher(publisherPubKey *ecdsa.PublicKey, ref swarm.Address) (swarm.Address, error) {
- keys, err := c.access.Session.Key(publisherPubKey, [][]byte{oneByteArray})
+ keys, err := c.access.Key(publisherPubKey, [][]byte{oneByteArray})
if err != nil {
return swarm.ZeroAddress, err
}
@@ -275,7 +275,7 @@ func (c *ControllerStruct) encryptRefForPublisher(publisherPubKey *ecdsa.PublicK
}
func (c *ControllerStruct) decryptRefForPublisher(publisherPubKey *ecdsa.PublicKey, encryptedRef swarm.Address) (swarm.Address, error) {
- keys, err := c.access.Session.Key(publisherPubKey, [][]byte{oneByteArray})
+ keys, err := c.access.Key(publisherPubKey, [][]byte{oneByteArray})
if err != nil {
return swarm.ZeroAddress, err
}
diff --git a/pkg/accesscontrol/controller_test.go b/pkg/accesscontrol/controller_test.go
index 915d82d7d8b..c282ef825fb 100644
--- a/pkg/accesscontrol/controller_test.go
+++ b/pkg/accesscontrol/controller_test.go
@@ -9,6 +9,7 @@ import (
"crypto/ecdsa"
"reflect"
"testing"
+ "testing/synctest"
"time"
"github.com/ethersphere/bee/v2/pkg/accesscontrol"
@@ -178,7 +179,7 @@ func TestController_UpdateHandler(t *testing.T) {
publisher := getPrivKey(1)
diffieHellman := accesscontrol.NewDefaultSession(publisher)
al := accesscontrol.NewLogic(diffieHellman)
- keys, err := al.Session.Key(&publisher.PublicKey, [][]byte{{1}})
+ keys, err := al.Key(&publisher.PublicKey, [][]byte{{1}})
assertNoError(t, "Session key", err)
refCipher := encryption.New(keys[0], 0, 0, sha3.NewLegacyKeccak256)
ls := createLs()
@@ -235,44 +236,46 @@ func TestController_UpdateHandler(t *testing.T) {
assert.Len(t, gl.Get(), 2)
})
t.Run("add and revoke then get from history", func(t *testing.T) {
- addRevokeList := []*ecdsa.PublicKey{&grantee.PublicKey}
- ref := swarm.RandAddress(t)
- _, hRef, encRef, err := c.UploadHandler(ctx, ls, ref, &publisher.PublicKey, swarm.ZeroAddress)
- require.NoError(t, err)
-
- // Need to wait a second before each update call so that a new history mantaray fork is created for the new key(timestamp) entry
- time.Sleep(1 * time.Second)
- beforeRevokeTS := time.Now().Unix()
- _, egranteeRef, hrefUpdate1, _, err := c.UpdateHandler(ctx, ls, gls, swarm.ZeroAddress, hRef, &publisher.PublicKey, addRevokeList, nil)
- require.NoError(t, err)
-
- time.Sleep(1 * time.Second)
- granteeRef, _, hrefUpdate2, _, err := c.UpdateHandler(ctx, ls, gls, egranteeRef, hrefUpdate1, &publisher.PublicKey, nil, addRevokeList)
- require.NoError(t, err)
-
- gl, err := accesscontrol.NewGranteeListReference(ctx, ls, granteeRef)
- require.NoError(t, err)
- assert.Empty(t, gl.Get())
- // expect history reference to be different after grantee list update
- assert.NotEqual(t, hrefUpdate1, hrefUpdate2)
-
- granteeDH := accesscontrol.NewDefaultSession(grantee)
- granteeAl := accesscontrol.NewLogic(granteeDH)
- granteeCtrl := accesscontrol.NewController(granteeAl)
- // download with grantee shall still work with the timestamp before the revoke
- decRef, err := granteeCtrl.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, beforeRevokeTS)
- require.NoError(t, err)
- assert.Equal(t, ref, decRef)
-
- // download with grantee shall NOT work with the latest timestamp
- decRef, err = granteeCtrl.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, time.Now().Unix())
- require.Error(t, err)
- assert.Equal(t, swarm.ZeroAddress, decRef)
-
- // publisher shall still be able to download with the timestamp before the revoke
- decRef, err = c.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, beforeRevokeTS)
- require.NoError(t, err)
- assert.Equal(t, ref, decRef)
+ synctest.Test(t, func(t *testing.T) {
+ addRevokeList := []*ecdsa.PublicKey{&grantee.PublicKey}
+ ref := swarm.RandAddress(t)
+ _, hRef, encRef, err := c.UploadHandler(ctx, ls, ref, &publisher.PublicKey, swarm.ZeroAddress)
+ require.NoError(t, err)
+
+ // Need to wait a second before each update call so that a new history mantaray fork is created for the new key(timestamp) entry
+ time.Sleep(1 * time.Second)
+ beforeRevokeTS := time.Now().Unix()
+ _, egranteeRef, hrefUpdate1, _, err := c.UpdateHandler(ctx, ls, gls, swarm.ZeroAddress, hRef, &publisher.PublicKey, addRevokeList, nil)
+ require.NoError(t, err)
+
+ time.Sleep(1 * time.Second)
+ granteeRef, _, hrefUpdate2, _, err := c.UpdateHandler(ctx, ls, gls, egranteeRef, hrefUpdate1, &publisher.PublicKey, nil, addRevokeList)
+ require.NoError(t, err)
+
+ gl, err := accesscontrol.NewGranteeListReference(ctx, ls, granteeRef)
+ require.NoError(t, err)
+ assert.Empty(t, gl.Get())
+ // expect history reference to be different after grantee list update
+ assert.NotEqual(t, hrefUpdate1, hrefUpdate2)
+
+ granteeDH := accesscontrol.NewDefaultSession(grantee)
+ granteeAl := accesscontrol.NewLogic(granteeDH)
+ granteeCtrl := accesscontrol.NewController(granteeAl)
+ // download with grantee shall still work with the timestamp before the revoke
+ decRef, err := granteeCtrl.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, beforeRevokeTS)
+ require.NoError(t, err)
+ assert.Equal(t, ref, decRef)
+
+ // download with grantee shall NOT work with the latest timestamp
+ decRef, err = granteeCtrl.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, time.Now().Unix())
+ require.Error(t, err)
+ assert.Equal(t, swarm.ZeroAddress, decRef)
+
+ // publisher shall still be able to download with the timestamp before the revoke
+ decRef, err = c.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, beforeRevokeTS)
+ require.NoError(t, err)
+ assert.Equal(t, ref, decRef)
+ })
})
t.Run("add twice", func(t *testing.T) {
addList := []*ecdsa.PublicKey{&grantee.PublicKey, &grantee.PublicKey}
diff --git a/pkg/accesscontrol/session.go b/pkg/accesscontrol/session.go
index a571337c17d..f3996264970 100644
--- a/pkg/accesscontrol/session.go
+++ b/pkg/accesscontrol/session.go
@@ -37,7 +37,7 @@ func (s *SessionStruct) Key(publicKey *ecdsa.PublicKey, nonces [][]byte) ([][]by
if publicKey == nil {
return nil, ErrInvalidPublicKey
}
- x, y := publicKey.Curve.ScalarMult(publicKey.X, publicKey.Y, s.key.D.Bytes())
+ x, y := publicKey.ScalarMult(publicKey.X, publicKey.Y, s.key.D.Bytes())
if x == nil || y == nil {
return nil, ErrSecretKeyInfinity
}
diff --git a/pkg/accounting/accounting.go b/pkg/accounting/accounting.go
index 35316e3a73b..a877a70d0ec 100644
--- a/pkg/accounting/accounting.go
+++ b/pkg/accounting/accounting.go
@@ -10,6 +10,7 @@ import (
"context"
"errors"
"fmt"
+ "maps"
"math/big"
"strings"
"sync"
@@ -311,10 +312,7 @@ func (a *Accounting) PrepareCredit(ctx context.Context, peer swarm.Address, pric
}
}
- timeElapsedInSeconds := (a.timeNow().UnixMilli() - accountingPeer.refreshTimestampMilliseconds) / 1000
- if timeElapsedInSeconds > 1 {
- timeElapsedInSeconds = 1
- }
+ timeElapsedInSeconds := min((a.timeNow().UnixMilli()-accountingPeer.refreshTimestampMilliseconds)/1000, 1)
refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate)
overdraftLimit := new(big.Int).Add(accountingPeer.paymentThreshold, refreshDue)
@@ -721,9 +719,7 @@ func (a *Accounting) PeerAccounting() (map[string]PeerInfo, error) {
a.accountingPeersMu.Lock()
accountingPeersList := make(map[string]*accountingPeer)
- for peer, accountingPeer := range a.accountingPeers {
- accountingPeersList[peer] = accountingPeer
- }
+ maps.Copy(accountingPeersList, a.accountingPeers)
a.accountingPeersMu.Unlock()
for peer, accountingPeer := range accountingPeersList {
@@ -746,10 +742,7 @@ func (a *Accounting) PeerAccounting() (map[string]PeerInfo, error) {
t := a.timeNow()
- timeElapsedInSeconds := t.Unix() - accountingPeer.refreshReceivedTimestamp
- if timeElapsedInSeconds > 1 {
- timeElapsedInSeconds = 1
- }
+ timeElapsedInSeconds := min(t.Unix()-accountingPeer.refreshReceivedTimestamp, 1)
// get appropriate refresh rate
refreshRate := new(big.Int).Set(a.refreshRate)
@@ -760,10 +753,7 @@ func (a *Accounting) PeerAccounting() (map[string]PeerInfo, error) {
refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), refreshRate)
currentThresholdGiven := new(big.Int).Add(accountingPeer.disconnectLimit, refreshDue)
- timeElapsedInSeconds = (t.UnixMilli() - accountingPeer.refreshTimestampMilliseconds) / 1000
- if timeElapsedInSeconds > 1 {
- timeElapsedInSeconds = 1
- }
+ timeElapsedInSeconds = min((t.UnixMilli()-accountingPeer.refreshTimestampMilliseconds)/1000, 1)
// get appropriate refresh rate
refreshDue = new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate)
@@ -1353,10 +1343,7 @@ func (d *debitAction) Apply() error {
a.metrics.TotalDebitedAmount.Add(tot)
a.metrics.DebitEventsCount.Inc()
- timeElapsedInSeconds := a.timeNow().Unix() - d.accountingPeer.refreshReceivedTimestamp
- if timeElapsedInSeconds > 1 {
- timeElapsedInSeconds = 1
- }
+ timeElapsedInSeconds := min(a.timeNow().Unix()-d.accountingPeer.refreshReceivedTimestamp, 1)
// get appropriate refresh rate
refreshRate := new(big.Int).Set(a.refreshRate)
diff --git a/pkg/accounting/accounting_test.go b/pkg/accounting/accounting_test.go
index bfcd0fcda05..ec72235534e 100644
--- a/pkg/accounting/accounting_test.go
+++ b/pkg/accounting/accounting_test.go
@@ -1503,7 +1503,7 @@ func TestAccountingCallPaymentErrorRetries(t *testing.T) {
acc.NotifyPaymentSent(peer1Addr, sentAmount, errors.New("error"))
// try another n requests 1 per second
- for i := 0; i < 10; i++ {
+ for range 10 {
ts++
acc.SetTime(ts)
@@ -1857,8 +1857,8 @@ func testAccountingSettlementGrowingThresholds(t *testing.T, settleFunc func(t *
checkPaymentThreshold := new(big.Int).Set(testPayThreshold)
// Simulate first 18 threshold upgrades
- for j := 0; j < 18; j++ {
- for i := 0; i < 100; i++ {
+ for range 18 {
+ for range 100 {
// expect no change in threshold while less than 100 seconds worth of refreshment rate was settled
settleFunc(t, acc, peer1Addr, testGrowth-1)
@@ -1891,7 +1891,7 @@ func testAccountingSettlementGrowingThresholds(t *testing.T, settleFunc func(t *
// Expect no increase for the next 179 seconds of refreshment
- for k := 0; k < 1799; k++ {
+ for range 1799 {
settleFunc(t, acc, peer1Addr, testGrowth)
@@ -1917,7 +1917,7 @@ func testAccountingSettlementGrowingThresholds(t *testing.T, settleFunc func(t *
// Expect no increase for another 3599 seconds of refreshments
- for k := 0; k < 3599; k++ {
+ for range 3599 {
settleFunc(t, acc, peer1Addr, testGrowth)
diff --git a/pkg/accounting/mock/accounting.go b/pkg/accounting/mock/accounting.go
index e5e0d98130e..cbf838f9ba3 100644
--- a/pkg/accounting/mock/accounting.go
+++ b/pkg/accounting/mock/accounting.go
@@ -44,14 +44,14 @@ type creditAction struct {
applied bool
}
-// WithDebitFunc sets the mock Debit function
+// WithPrepareDebitFunc sets the mock PrepareDebit function
func WithPrepareDebitFunc(f func(peer swarm.Address, price uint64) (accounting.Action, error)) Option {
return optionFunc(func(s *Service) {
s.prepareDebitFunc = f
})
}
-// WithDebitFunc sets the mock Debit function
+// WithPrepareCreditFunc sets the mock PrepareCredit function
func WithPrepareCreditFunc(f func(peer swarm.Address, price uint64, originated bool) (accounting.Action, error)) Option {
return optionFunc(func(s *Service) {
s.prepareCreditFunc = f
diff --git a/pkg/addressbook/addressbook_test.go b/pkg/addressbook/addressbook_test.go
index 1b5c22490b5..1307ad92b67 100644
--- a/pkg/addressbook/addressbook_test.go
+++ b/pkg/addressbook/addressbook_test.go
@@ -47,7 +47,7 @@ func run(t *testing.T, f bookFunc) {
t.Fatal(err)
}
- bzzAddr, err := bzz.NewAddress(crypto.NewDefaultSigner(pk), multiaddr, addr1, 1, trxHash)
+ bzzAddr, err := bzz.NewAddress(crypto.NewDefaultSigner(pk), []ma.Multiaddr{multiaddr}, addr1, 1, trxHash)
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/api/api.go b/pkg/api/api.go
index e3419ccdb38..2b713bc76d4 100644
--- a/pkg/api/api.go
+++ b/pkg/api/api.go
@@ -43,6 +43,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/pss"
"github.com/ethersphere/bee/v2/pkg/resolver"
"github.com/ethersphere/bee/v2/pkg/resolver/client/ens"
+ "github.com/ethersphere/bee/v2/pkg/resolver/multiresolver"
"github.com/ethersphere/bee/v2/pkg/sctx"
"github.com/ethersphere/bee/v2/pkg/settlement"
"github.com/ethersphere/bee/v2/pkg/settlement/swap"
@@ -78,6 +79,7 @@ const (
SwarmSocSignatureHeader = "Swarm-Soc-Signature"
SwarmFeedIndexHeader = "Swarm-Feed-Index"
SwarmFeedIndexNextHeader = "Swarm-Feed-Index-Next"
+ SwarmFeedResolvedVersionHeader = "Swarm-Feed-Resolved-Version"
SwarmOnlyRootChunk = "Swarm-Only-Root-Chunk"
SwarmCollectionHeader = "Swarm-Collection"
SwarmPostageBatchIdHeader = "Swarm-Postage-Batch-Id"
@@ -109,9 +111,8 @@ const (
)
const (
- multiPartFormData = "multipart/form-data"
- contentTypeTar = "application/x-tar"
- boolHeaderSetValue = "true"
+ multiPartFormData = "multipart/form-data"
+ contentTypeTar = "application/x-tar"
)
var (
@@ -214,8 +215,9 @@ type Service struct {
whitelistedWithdrawalAddress []common.Address
- preMapHooks map[string]func(v string) (string, error)
- validate *validator.Validate
+ preMapHooks map[string]func(v string) (string, error)
+ customValidationMessages map[string]func(err validator.FieldError) error
+ validate *validator.Validate
redistributionAgent *storageincentives.Agent
@@ -321,6 +323,7 @@ func New(
}
return name
})
+ s.setupValidation()
s.stamperStore = stamperStore
for _, v := range whitelistedWithdrawalAddress {
@@ -449,6 +452,10 @@ func (s *Service) resolveNameOrAddress(str string) (swarm.Address, error) {
return addr, nil
}
+ if errors.Is(err, multiresolver.ErrResolverService) || errors.Is(err, resolver.ErrServiceNotAvailable) {
+ return swarm.ZeroAddress, err
+ }
+
return swarm.ZeroAddress, fmt.Errorf("%w: %w", errInvalidNameOrAddress, err)
}
@@ -620,7 +627,7 @@ func (s *Service) checkOrigin(r *http.Request) bool {
// validationError is a custom error type for validation errors.
type validationError struct {
Entry string
- Value interface{}
+ Value any
Cause error
}
@@ -632,7 +639,7 @@ func (e *validationError) Error() string {
// mapStructure maps the input into output struct and validates the output.
// It's a helper method for the handlers, which reduces the chattiness
// of the code.
-func (s *Service) mapStructure(input, output interface{}) func(string, log.Logger, http.ResponseWriter) {
+func (s *Service) mapStructure(input, output any) func(string, log.Logger, http.ResponseWriter) {
// response unifies the response format for parsing and validation errors.
response := func(err error) func(string, log.Logger, http.ResponseWriter) {
return func(msg string, logger log.Logger, w http.ResponseWriter) {
@@ -651,13 +658,23 @@ func (s *Service) mapStructure(input, output interface{}) func(string, log.Logge
Message: msg,
Code: http.StatusBadRequest,
}
+ hasServiceUnavailable := false
for _, err := range merr.Errors {
+ if errors.Is(err, resolver.ErrServiceNotAvailable) {
+ hasServiceUnavailable = true
+ resp.Reasons = append(resp.Reasons, jsonhttp.Reason{
+ Field: "address",
+ Error: err.Error(),
+ })
+ continue
+ }
var perr *parseError
if errors.As(err, &perr) {
resp.Reasons = append(resp.Reasons, jsonhttp.Reason{
Field: perr.Entry,
Error: perr.Cause.Error(),
})
+ continue
}
var verr *validationError
if errors.As(err, &verr) {
@@ -667,7 +684,14 @@ func (s *Service) mapStructure(input, output interface{}) func(string, log.Logge
})
}
}
- jsonhttp.BadRequest(w, resp)
+
+ if hasServiceUnavailable {
+ resp.Message = "service unavailable"
+ resp.Code = http.StatusServiceUnavailable
+ jsonhttp.ServiceUnavailable(w, resp)
+ } else {
+ jsonhttp.BadRequest(w, resp)
+ }
}
}
@@ -688,11 +712,17 @@ func (s *Service) mapStructure(input, output interface{}) func(string, log.Logge
case []byte:
val = string(v)
}
+ var cause error
+ if msgFn, ok := s.customValidationMessages[err.Tag()]; ok {
+ cause = msgFn(err)
+ } else {
+ cause = fmt.Errorf("want %s:%s", err.Tag(), err.Param())
+ }
vErrs = multierror.Append(vErrs,
&validationError{
Entry: strings.ToLower(err.Field()),
Value: val,
- Cause: fmt.Errorf("want %s:%s", err.Tag(), err.Param()),
+ Cause: cause,
})
}
return response(vErrs.ErrorOrNil())
diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go
index 986bc663d3e..4dd7aa1d3f7 100644
--- a/pkg/api/api_test.go
+++ b/pkg/api/api_test.go
@@ -266,7 +266,7 @@ func newTestServer(t *testing.T, o testServerOptions) (*http.Client, *websocket.
transport = transport.Clone()
// always dial to the server address, regardless of the url host and port
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
- return net.Dial(network, ts.Listener.Addr().String())
+ return (&net.Dialer{}).DialContext(ctx, network, ts.Listener.Addr().String())
}
return transport.RoundTrip(r)
}),
diff --git a/pkg/api/bytes.go b/pkg/api/bytes.go
index 4c1cd891df0..945e9c3beae 100644
--- a/pkg/api/bytes.go
+++ b/pkg/api/bytes.go
@@ -40,7 +40,7 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
Pin bool `map:"Swarm-Pin"`
Deferred *bool `map:"Swarm-Deferred-Upload"`
Encrypt bool `map:"Swarm-Encrypt"`
- RLevel redundancy.Level `map:"Swarm-Redundancy-Level"`
+ RLevel redundancy.Level `map:"Swarm-Redundancy-Level" validate:"rLevel"`
Act bool `map:"Swarm-Act"`
HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"`
}{}
diff --git a/pkg/api/bytes_test.go b/pkg/api/bytes_test.go
index 40280e44992..56ec6574565 100644
--- a/pkg/api/bytes_test.go
+++ b/pkg/api/bytes_test.go
@@ -8,11 +8,13 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"net/http"
"strconv"
"testing"
"github.com/ethersphere/bee/v2/pkg/api"
+ "github.com/ethersphere/bee/v2/pkg/file/redundancy"
"github.com/ethersphere/bee/v2/pkg/jsonhttp"
"github.com/ethersphere/bee/v2/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/v2/pkg/log"
@@ -408,3 +410,68 @@ func TestBytesDirectUpload(t *testing.T) {
}),
)
}
+
+func TestBytesRedundancyLevel(t *testing.T) {
+ t.Parallel()
+
+ client, _, _, _ := newTestServer(t, testServerOptions{
+ Storer: mockstorer.New(),
+ Post: mockpost.New(mockpost.WithAcceptAll()),
+ })
+
+ const maxValidLevel = redundancy.PARANOID
+
+ tests := []struct {
+ name string
+ level int
+ want *jsonhttp.StatusResponse
+ }{
+ {"minimum level (NONE) is valid", int(redundancy.NONE), nil},
+ {"maximum valid level (PARANOID) is valid", int(maxValidLevel), nil},
+ {
+ "level below minimum is invalid", int(-1),
+ &jsonhttp.StatusResponse{
+ Code: http.StatusBadRequest,
+ Message: "invalid header params",
+ Reasons: []jsonhttp.Reason{
+ {
+ Field: "Swarm-Redundancy-Level",
+ Error: "invalid syntax",
+ },
+ },
+ },
+ },
+ {
+ "level above maximum is invalid", int(maxValidLevel + 1),
+ &jsonhttp.StatusResponse{
+ Code: http.StatusBadRequest,
+ Message: "invalid header params",
+ Reasons: []jsonhttp.Reason{
+ {
+ Field: "swarm-redundancy-level",
+ Error: fmt.Sprintf("want redundancy level to be between %d and %d", int(redundancy.NONE), int(redundancy.PARANOID)),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ opts := []jsonhttptest.Option{
+ jsonhttptest.WithRequestHeader(api.SwarmDeferredUploadHeader, "true"),
+ jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
+ jsonhttptest.WithRequestHeader(api.SwarmRedundancyLevelHeader, strconv.Itoa(tt.level)),
+ jsonhttptest.WithRequestBody(bytes.NewReader([]byte("test"))),
+ }
+ var statusCode int
+ if tt.want == nil {
+ statusCode = http.StatusCreated
+ } else {
+ statusCode = tt.want.Code
+ opts = append(opts, jsonhttptest.WithExpectedJSONResponse(*tt.want))
+ }
+ jsonhttptest.Request(t, client, http.MethodPost, "/bytes", statusCode, opts...)
+ })
+ }
+}
diff --git a/pkg/api/bzz.go b/pkg/api/bzz.go
index e692091aa0d..d99594bc682 100644
--- a/pkg/api/bzz.go
+++ b/pkg/api/bzz.go
@@ -72,7 +72,7 @@ func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
Deferred *bool `map:"Swarm-Deferred-Upload"`
Encrypt bool `map:"Swarm-Encrypt"`
IsDir bool `map:"Swarm-Collection"`
- RLevel redundancy.Level `map:"Swarm-Redundancy-Level"`
+ RLevel redundancy.Level `map:"Swarm-Redundancy-Level" validate:"rLevel"`
Act bool `map:"Swarm-Act"`
HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"`
}{}
@@ -364,6 +364,115 @@ func (s *Service) bzzHeadHandler(w http.ResponseWriter, r *http.Request) {
s.serveReference(logger, address, paths.Path, w, r, true)
}
+type getWrappedResult struct {
+ ch swarm.Chunk
+ v1 bool // indicates whether the feed that was resolved is v1. false if v2
+ err error
+}
+
+// resolveFeed races the resolution of both types of feeds.
+// figure out if its a v1 or v2 chunk.
+// it returns the first correct feed found, the type found ("v1" or "v2") or an error.
+func (s *Service) resolveFeed(ctx context.Context, getter storage.Getter, ch swarm.Chunk) (swarm.Chunk, string, error) {
+ innerCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ getWrapped := func(v1 bool) chan getWrappedResult {
+ ret := make(chan getWrappedResult)
+ go func() {
+ wc, err := feeds.GetWrappedChunk(innerCtx, getter, ch, v1)
+ if err != nil {
+ select {
+ case ret <- getWrappedResult{nil, v1, err}:
+ return
+ case <-innerCtx.Done():
+ return
+ }
+ }
+
+ // here we just check whether the address is retrievable.
+ // if it returns an error we send that over the channel, otherwise
+ // we send the wc chunk back to the caller so that the feed can be
+ // dereferenced.
+ _, err = getter.Get(innerCtx, wc.Address())
+ if err != nil {
+ select {
+ case ret <- getWrappedResult{wc, v1, err}:
+ return
+ case <-innerCtx.Done():
+ return
+ }
+ }
+ select {
+ case ret <- getWrappedResult{wc, v1, nil}:
+ return
+ case <-innerCtx.Done():
+ return
+ }
+ }()
+ return ret
+ }
+ isV1, err := feeds.IsV1Payload(ch)
+ if err != nil {
+ return nil, "", err
+ }
+ // if we have v1 length, it means there's ambiguity so we
+ // should fetch both feed versions. if the length isn't v1
+ // then we should only try to fetch v2.
+ var (
+ v1C, v2C chan getWrappedResult
+ both = false
+ )
+ if isV1 {
+ both = true
+ v1C = getWrapped(true)
+ v2C = getWrapped(false)
+ } else {
+ v2C = getWrapped(false)
+ }
+
+ // closure to handle processing one channel then the other.
+ // the "resolving" parameter is meant to tell the closure which feed type is in the result struct
+ // which in turns allows it to return which feed type was resolved.
+ processChanOutput := func(resolving string, result getWrappedResult, other chan getWrappedResult) (swarm.Chunk, string, error) {
+ defer cancel()
+ if !both {
+ if resolving == "v2" {
+ return result.ch, resolving, nil
+ }
+ return result.ch, resolving, result.err
+ }
+ // both are being checked. if there's no err return the chunk
+ // otherwise wait for the other channel
+ if result.err == nil {
+ return result.ch, resolving, nil
+ }
+ if resolving == "v1" {
+ resolving = "v2"
+ } else {
+ resolving = "v1"
+ }
+ // wait for the other one
+ select {
+ case result := <-other:
+ if !result.v1 {
+ // resolving v2
+ return result.ch, resolving, nil
+ }
+ return result.ch, resolving, result.err
+ case <-innerCtx.Done():
+ return nil, "", ctx.Err()
+ }
+ }
+ select {
+ case v1r := <-v1C:
+ return processChanOutput("v1", v1r, v2C)
+ case v2r := <-v2C:
+ return processChanOutput("v2", v2r, v1C)
+ case <-innerCtx.Done():
+ return nil, "", ctx.Err()
+ }
+}
+
func (s *Service) serveReference(logger log.Logger, address swarm.Address, pathVar string, w http.ResponseWriter, r *http.Request, headerOnly bool) {
loggerV1 := logger.V(1).Build()
@@ -371,7 +480,7 @@ func (s *Service) serveReference(logger log.Logger, address swarm.Address, pathV
Cache *bool `map:"Swarm-Cache"`
Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"`
FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"`
- RLevel *redundancy.Level `map:"Swarm-Redundancy-Level"`
+ RLevel *redundancy.Level `map:"Swarm-Redundancy-Level" validate:"omitempty,rLevel"`
ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"`
}{}
@@ -399,7 +508,6 @@ func (s *Service) serveReference(logger log.Logger, address swarm.Address, pathV
jsonhttp.BadRequest(w, "could not parse headers")
return
}
-
FETCH:
// read manifest entry
m, err := manifest.NewDefaultManifestReference(
@@ -433,17 +541,30 @@ FETCH:
jsonhttp.NotFound(w, "no update found")
return
}
- wc, err := feeds.GetWrappedChunk(ctx, s.storer.Download(cache), ch)
+
+ wc, feedVer, err := s.resolveFeed(ctx, s.storer.Download(cache), ch)
if err != nil {
+ if errors.Is(err, feeds.ErrNotLegacyPayload) {
+ logger.Debug("bzz: download: feed is not a legacy payload")
+ logger.Error(err, "bzz download: feed is not a legacy payload")
+ jsonhttp.BadRequest(w, "bzz download: feed is not a legacy payload")
+ return
+ }
+ if errors.As(err, &feeds.WrappedChunkNotFoundError{}) {
+ logger.Debug("bzz download: feed pointing to the wrapped chunk not found", "error", err)
+ logger.Error(err, "bzz download: feed pointing to the wrapped chunk not found")
+ jsonhttp.NotFound(w, "bzz download: feed pointing to the wrapped chunk not found")
+ return
+ }
logger.Debug("bzz download: mapStructure feed update failed", "error", err)
logger.Error(nil, "bzz download: mapStructure feed update failed")
jsonhttp.InternalServerError(w, "mapStructure feed update")
return
}
+
address = wc.Address()
// modify ls and init with non-existing wrapped chunk
ls = loadsave.NewReadonlyWithRootCh(s.storer.Download(cache), s.storer.Cache(), wc, rLevel)
-
feedDereferenced = true
curBytes, err := cur.MarshalBinary()
if err != nil {
@@ -454,6 +575,7 @@ FETCH:
}
w.Header().Set(SwarmFeedIndexHeader, hex.EncodeToString(curBytes))
+ w.Header().Set(SwarmFeedResolvedVersionHeader, feedVer)
// this header might be overriding others. handle with care. in the future
// we should implement an append functionality for this specific header,
// since different parts of handlers might be overriding others' values
@@ -462,7 +584,6 @@ FETCH:
goto FETCH
}
}
-
if pathVar == "" {
loggerV1.Debug("bzz download: handle empty path", "address", address)
@@ -477,6 +598,7 @@ FETCH:
return
}
}
+
logger.Debug("bzz download: address not found or incorrect", "address", address, "path", pathVar)
logger.Error(nil, "address not found or incorrect")
jsonhttp.NotFound(w, "address not found or incorrect")
@@ -571,7 +693,7 @@ func (s *Service) serveManifestEntry(
func (s *Service) downloadHandler(logger log.Logger, w http.ResponseWriter, r *http.Request, reference swarm.Address, additionalHeaders http.Header, etag, headersOnly bool, rootCh swarm.Chunk) {
headers := struct {
Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"`
- RLevel *redundancy.Level `map:"Swarm-Redundancy-Level"`
+ RLevel *redundancy.Level `map:"Swarm-Redundancy-Level" validate:"omitempty,rLevel"`
FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"`
ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"`
LookaheadBufferSize *int `map:"Swarm-Lookahead-Buffer-Size"`
diff --git a/pkg/api/bzz_test.go b/pkg/api/bzz_test.go
index d69eb79df95..3f444e6cb07 100644
--- a/pkg/api/bzz_test.go
+++ b/pkg/api/bzz_test.go
@@ -13,6 +13,7 @@ import (
"mime"
"mime/multipart"
"net/http"
+ "net/url"
"strconv"
"strings"
"testing"
@@ -26,6 +27,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/manifest"
mockbatchstore "github.com/ethersphere/bee/v2/pkg/postage/batchstore/mock"
mockpost "github.com/ethersphere/bee/v2/pkg/postage/mock"
+ testingsoc "github.com/ethersphere/bee/v2/pkg/soc/testing"
"github.com/ethersphere/bee/v2/pkg/storage/inmemchunkstore"
mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock"
"github.com/ethersphere/bee/v2/pkg/swarm"
@@ -102,10 +104,7 @@ func TestBzzUploadDownloadWithRedundancy_FLAKY(t *testing.T) {
store.Record()
defer store.Unrecord()
// we intend to forget as many chunks as possible for the given redundancy level
- forget := parityCnt
- if parityCnt > shardCnt {
- forget = shardCnt
- }
+ forget := min(parityCnt, shardCnt)
if levels == 1 {
forget = 2
}
@@ -139,7 +138,7 @@ func TestBzzUploadDownloadWithRedundancy_FLAKY(t *testing.T) {
if len(got) != len(want) {
t.Fatalf("got %v parts, want %v parts", len(got), len(want))
}
- for i := 0; i < len(want); i++ {
+ for i := range want {
if !bytes.Equal(got[i], want[i]) {
t.Errorf("part %v: got %q, want %q", i, string(got[i]), string(want[i]))
}
@@ -668,7 +667,7 @@ func TestBzzFilesRangeRequests(t *testing.T) {
if len(got) != len(want) {
t.Fatalf("got %v parts, want %v parts", len(got), len(want))
}
- for i := 0; i < len(want); i++ {
+ for i := range want {
if !bytes.Equal(got[i], want[i]) {
t.Errorf("part %v: got %q, want %q", i, string(got[i]), string(want[i]))
}
@@ -679,7 +678,7 @@ func TestBzzFilesRangeRequests(t *testing.T) {
}
}
-func createRangeHeader(data interface{}, ranges [][2]int) (header string, parts [][]byte) {
+func createRangeHeader(data any, ranges [][2]int) (header string, parts [][]byte) {
getLen := func() int {
switch data := data.(type) {
case []byte:
@@ -759,11 +758,20 @@ func TestFeedIndirection(t *testing.T) {
updateData = []byte("
Swarm Feeds Hello World!
")
logger = log.Noop
storer = mockstorer.New()
+ ctx = context.Background()
client, _, _, _ = newTestServer(t, testServerOptions{
Storer: storer,
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
+ bzzDownloadResource = func(addr, path string) string {
+ values := url.Values{}
+ baseURL := "/bzz/" + addr + "/" + path
+ if len(values) > 0 {
+ return baseURL + "?" + values.Encode()
+ }
+ return baseURL
+ }
)
// tar all the test case files
tarReader := tarFiles(t, []f{
@@ -794,29 +802,6 @@ func TestFeedIndirection(t *testing.T) {
t.Fatalf("expected file reference, did not got any")
}
- // now use the "content" to mock the feed lookup
- // also, use the mocked mantaray chunks that unmarshal
- // into a real manifest with the mocked feed values when
- // called from the bzz endpoint. then call the bzz endpoint with
- // the pregenerated feed root manifest hash
-
- feedUpdate := toChunk(t, 121212, resp.Reference.Bytes())
-
- var (
- look = newMockLookup(-1, 0, feedUpdate, nil, &id{}, nil)
- factory = newMockFactory(look)
- bzzDownloadResource = func(addr, path string) string { return "/bzz/" + addr + "/" + path }
- ctx = context.Background()
- )
- client, _, _, _ = newTestServer(t, testServerOptions{
- Storer: storer,
- Logger: logger,
- Feeds: factory,
- })
- err := storer.Cache().Put(ctx, feedUpdate)
- if err != nil {
- t.Fatal(err)
- }
m, err := manifest.NewDefaultManifest(
loadsave.New(storer.ChunkStore(), storer.Cache(), pipelineFactory(storer.Cache(), false, 0), redundancy.DefaultLevel),
false,
@@ -838,14 +823,63 @@ func TestFeedIndirection(t *testing.T) {
t.Fatal(err)
}
- jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(manifRef.String(), ""), http.StatusOK,
- jsonhttptest.WithExpectedResponse(updateData),
- jsonhttptest.WithExpectedContentLength(len(updateData)),
- jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmFeedIndexHeader),
- jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.ContentDispositionHeader),
- jsonhttptest.WithExpectedResponseHeader(api.ContentDispositionHeader, `inline; filename="index.html"`),
- jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "text/html; charset=utf-8"),
- )
+ // now use the "content" root chunk to mock the feed lookup
+ // also, use the mocked mantaray chunks that unmarshal
+ // into a real manifest with the mocked feed values when
+ // called from the bzz endpoint. then call the bzz endpoint with
+ // the pregenerated feed root manifest hash
+
+ t.Run("legacy feed", func(t *testing.T) {
+ feedUpdate := toChunk(t, 121212, resp.Reference.Bytes())
+ var (
+ look = newMockLookup(-1, 0, feedUpdate, nil, &id{}, nil)
+ factory = newMockFactory(look)
+ )
+ client, _, _, _ = newTestServer(t, testServerOptions{
+ Storer: storer,
+ Logger: logger,
+ Feeds: factory,
+ })
+
+ jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(manifRef.String(), ""), http.StatusOK,
+ jsonhttptest.WithExpectedResponse(updateData),
+ jsonhttptest.WithExpectedContentLength(len(updateData)),
+ jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmFeedIndexHeader),
+ jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.ContentDispositionHeader),
+ jsonhttptest.WithExpectedResponseHeader(api.ContentDispositionHeader, `inline; filename="index.html"`),
+ jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "text/html; charset=utf-8"),
+ jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedResolvedVersionHeader, "v1"),
+ )
+ })
+
+ t.Run("wrapped feed", func(t *testing.T) {
+ // get root chunk of data and wrap it in a feed
+ rootCh, err := storer.ChunkStore().Get(ctx, resp.Reference)
+ if err != nil {
+ t.Fatal(err)
+ }
+ socRootCh := testingsoc.GenerateMockSOC(t, rootCh.Data()[swarm.SpanSize:]).Chunk()
+
+ var (
+ look = newMockLookup(-1, 0, socRootCh, nil, &id{}, nil)
+ factory = newMockFactory(look)
+ )
+ client, _, _, _ = newTestServer(t, testServerOptions{
+ Storer: storer,
+ Logger: logger,
+ Feeds: factory,
+ })
+
+ jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(manifRef.String(), ""), http.StatusOK,
+ jsonhttptest.WithExpectedResponse(updateData),
+ jsonhttptest.WithExpectedContentLength(len(updateData)),
+ jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmFeedIndexHeader),
+ jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.ContentDispositionHeader),
+ jsonhttptest.WithExpectedResponseHeader(api.ContentDispositionHeader, `inline; filename="index.html"`),
+ jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "text/html; charset=utf-8"),
+ jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedResolvedVersionHeader, "v2"),
+ )
+ })
}
func Test_bzzDownloadHandler_invalidInputs(t *testing.T) {
@@ -1144,3 +1178,144 @@ func TestBzzDownloadHeaders(t *testing.T) {
jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "text/html; charset=utf-8"),
)
}
+
+func TestBzzUploadRedundancyLevel(t *testing.T) {
+ t.Parallel()
+
+ client, _, _, _ := newTestServer(t, testServerOptions{
+ Storer: mockstorer.New(),
+ Post: mockpost.New(mockpost.WithAcceptAll()),
+ })
+
+ const maxValidLevel = redundancy.PARANOID
+
+ tests := []struct {
+ name string
+ level int
+ want *jsonhttp.StatusResponse
+ }{
+ {"minimum level (NONE) is valid", int(redundancy.NONE), nil},
+ {"maximum valid level (PARANOID) is valid", int(maxValidLevel), nil},
+ {
+ "level below minimum is invalid", int(-1),
+ &jsonhttp.StatusResponse{
+ Code: http.StatusBadRequest,
+ Message: "invalid header params",
+ Reasons: []jsonhttp.Reason{
+ {
+ Field: "Swarm-Redundancy-Level",
+ Error: "invalid syntax",
+ },
+ },
+ },
+ },
+ {
+ "level above maximum is invalid", int(maxValidLevel + 1),
+ &jsonhttp.StatusResponse{
+ Code: http.StatusBadRequest,
+ Message: "invalid header params",
+ Reasons: []jsonhttp.Reason{
+ {
+ Field: "swarm-redundancy-level",
+ Error: fmt.Sprintf("want redundancy level to be between %d and %d", int(redundancy.NONE), int(redundancy.PARANOID)),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ opts := []jsonhttptest.Option{
+ jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/plain"),
+ jsonhttptest.WithRequestHeader(api.SwarmDeferredUploadHeader, "true"),
+ jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
+ jsonhttptest.WithRequestHeader(api.SwarmRedundancyLevelHeader, strconv.Itoa(tt.level)),
+ jsonhttptest.WithRequestBody(bytes.NewReader([]byte("test"))),
+ }
+ var statusCode int
+ if tt.want == nil {
+ statusCode = http.StatusCreated
+ } else {
+ statusCode = tt.want.Code
+ opts = append(opts, jsonhttptest.WithExpectedJSONResponse(*tt.want))
+ }
+ jsonhttptest.Request(t, client, http.MethodPost, "/bzz", statusCode, opts...)
+ })
+ }
+}
+
+func TestBzzDownloadRedundancyLevel(t *testing.T) {
+ t.Parallel()
+
+ client, _, _, _ := newTestServer(t, testServerOptions{
+ Storer: mockstorer.New(),
+ Post: mockpost.New(mockpost.WithAcceptAll()),
+ })
+
+ testData := []byte("test download redundancy level")
+ var resp api.BzzUploadResponse
+ jsonhttptest.Request(t, client, http.MethodPost, "/bzz", http.StatusCreated,
+ jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/plain"),
+ jsonhttptest.WithRequestHeader(api.SwarmDeferredUploadHeader, "true"),
+ jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
+ jsonhttptest.WithRequestBody(bytes.NewReader(testData)),
+ jsonhttptest.WithUnmarshalJSONResponse(&resp),
+ )
+
+ const maxValidLevel = redundancy.PARANOID
+
+ tests := []struct {
+ name string
+ level int
+ want *jsonhttp.StatusResponse
+ }{
+ {"minimum level (NONE) is valid", int(redundancy.NONE), nil},
+ {"maximum valid level (PARANOID) is valid", int(maxValidLevel), nil},
+ {
+ "level below minimum is invalid", int(-1),
+ &jsonhttp.StatusResponse{
+ Code: http.StatusBadRequest,
+ Message: "invalid header params",
+ Reasons: []jsonhttp.Reason{
+ {
+ Field: "Swarm-Redundancy-Level",
+ Error: "invalid syntax",
+ },
+ },
+ },
+ },
+ {
+ "level above maximum is invalid", int(maxValidLevel + 1),
+ &jsonhttp.StatusResponse{
+ Code: http.StatusBadRequest,
+ Message: "invalid header params",
+ Reasons: []jsonhttp.Reason{
+ {
+ Field: "swarm-redundancy-level",
+ Error: fmt.Sprintf("want redundancy level to be between %d and %d", int(redundancy.NONE), int(redundancy.PARANOID)),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ opts := []jsonhttptest.Option{
+ jsonhttptest.WithRequestHeader(api.SwarmRedundancyLevelHeader, strconv.Itoa(tt.level)),
+ }
+ var statusCode int
+ if tt.want == nil {
+ statusCode = http.StatusOK
+ opts = append(opts,
+ jsonhttptest.WithExpectedResponse(testData),
+ )
+ } else {
+ statusCode = tt.want.Code
+ opts = append(opts, jsonhttptest.WithExpectedJSONResponse(*tt.want))
+ }
+ jsonhttptest.Request(t, client, http.MethodGet, "/bzz/"+resp.Reference.String(), statusCode, opts...)
+ })
+ }
+}
diff --git a/pkg/api/chequebook.go b/pkg/api/chequebook.go
index 33cd75d9767..cb82e72edfd 100644
--- a/pkg/api/chequebook.go
+++ b/pkg/api/chequebook.go
@@ -22,7 +22,6 @@ import (
const (
errChequebookBalance = "cannot get chequebook balance"
- errChequebookNoAmount = "did not specify amount"
errChequebookNoWithdraw = "cannot withdraw"
errChequebookNoDeposit = "cannot deposit"
errChequebookInsufficientFunds = "insufficient funds"
@@ -118,9 +117,9 @@ func (s *Service) chequebookLastPeerHandler(w http.ResponseWriter, r *http.Reque
}
if err == nil {
lastSentResponse = &chequebookLastChequePeerResponse{
- Beneficiary: lastSent.Cheque.Beneficiary.String(),
- Chequebook: lastSent.Cheque.Chequebook.String(),
- Payout: bigint.Wrap(lastSent.Cheque.CumulativePayout),
+ Beneficiary: lastSent.Beneficiary.String(),
+ Chequebook: lastSent.Chequebook.String(),
+ Payout: bigint.Wrap(lastSent.CumulativePayout),
}
}
@@ -134,9 +133,9 @@ func (s *Service) chequebookLastPeerHandler(w http.ResponseWriter, r *http.Reque
}
if err == nil {
lastReceivedResponse = &chequebookLastChequePeerResponse{
- Beneficiary: lastReceived.Cheque.Beneficiary.String(),
- Chequebook: lastReceived.Cheque.Chequebook.String(),
- Payout: bigint.Wrap(lastReceived.Cheque.CumulativePayout),
+ Beneficiary: lastReceived.Beneficiary.String(),
+ Chequebook: lastReceived.Chequebook.String(),
+ Payout: bigint.Wrap(lastReceived.CumulativePayout),
}
}
@@ -179,9 +178,9 @@ func (s *Service) chequebookAllLastHandler(w http.ResponseWriter, _ *http.Reques
lcr[i] = chequebookLastChequesPeerResponse{
Peer: i,
LastSent: &chequebookLastChequePeerResponse{
- Beneficiary: j.Cheque.Beneficiary.String(),
- Chequebook: j.Cheque.Chequebook.String(),
- Payout: bigint.Wrap(j.Cheque.CumulativePayout),
+ Beneficiary: j.Beneficiary.String(),
+ Chequebook: j.Chequebook.String(),
+ Payout: bigint.Wrap(j.CumulativePayout),
},
LastReceived: nil,
}
@@ -190,9 +189,9 @@ func (s *Service) chequebookAllLastHandler(w http.ResponseWriter, _ *http.Reques
if _, ok := lcr[i]; ok {
t := lcr[i]
t.LastReceived = &chequebookLastChequePeerResponse{
- Beneficiary: j.Cheque.Beneficiary.String(),
- Chequebook: j.Cheque.Chequebook.String(),
- Payout: bigint.Wrap(j.Cheque.CumulativePayout),
+ Beneficiary: j.Beneficiary.String(),
+ Chequebook: j.Chequebook.String(),
+ Payout: bigint.Wrap(j.CumulativePayout),
}
lcr[i] = t
} else {
@@ -200,9 +199,9 @@ func (s *Service) chequebookAllLastHandler(w http.ResponseWriter, _ *http.Reques
Peer: i,
LastSent: nil,
LastReceived: &chequebookLastChequePeerResponse{
- Beneficiary: j.Cheque.Beneficiary.String(),
- Chequebook: j.Cheque.Chequebook.String(),
- Payout: bigint.Wrap(j.Cheque.CumulativePayout),
+ Beneficiary: j.Beneficiary.String(),
+ Chequebook: j.Chequebook.String(),
+ Payout: bigint.Wrap(j.CumulativePayout),
},
}
}
diff --git a/pkg/api/chunk_stream_test.go b/pkg/api/chunk_stream_test.go
index 47c8e860b51..5ba62cdd76c 100644
--- a/pkg/api/chunk_stream_test.go
+++ b/pkg/api/chunk_stream_test.go
@@ -38,7 +38,7 @@ func TestChunkUploadStream(t *testing.T) {
t.Run("upload and verify", func(t *testing.T) {
chsToGet := []swarm.Chunk{}
- for i := 0; i < 5; i++ {
+ for range 5 {
ch := testingc.GenerateTestRandomChunk()
err := wsConn.SetWriteDeadline(time.Now().Add(time.Second))
diff --git a/pkg/api/chunk_test.go b/pkg/api/chunk_test.go
index 35ad491e606..aa4e4ec485f 100644
--- a/pkg/api/chunk_test.go
+++ b/pkg/api/chunk_test.go
@@ -248,7 +248,7 @@ func TestChunkInvalidParams(t *testing.T) {
})
}
-// TestDirectChunkUpload tests that the direct upload endpoint give correct error message in dev mode
+// TestChunkDirectUpload tests that the direct upload endpoint give correct error message in dev mode
func TestChunkDirectUpload(t *testing.T) {
t.Parallel()
var (
diff --git a/pkg/api/export_test.go b/pkg/api/export_test.go
index 1e5e8f1554f..00494f928da 100644
--- a/pkg/api/export_test.go
+++ b/pkg/api/export_test.go
@@ -133,7 +133,7 @@ var ErrHexLength = errHexLength
type HexInvalidByteError = hexInvalidByteError
-func MapStructure(input, output interface{}, hooks map[string]func(v string) (string, error)) error {
+func MapStructure(input, output any, hooks map[string]func(v string) (string, error)) error {
return mapStructure(input, output, hooks)
}
diff --git a/pkg/api/feed.go b/pkg/api/feed.go
index a1345e2b39d..cc73fc6f3df 100644
--- a/pkg/api/feed.go
+++ b/pkg/api/feed.go
@@ -71,7 +71,6 @@ func (s *Service) feedGetHandler(w http.ResponseWriter, r *http.Request) {
response("invalid header params", logger, w)
return
}
-
f := feeds.New(paths.Topic, paths.Owner)
lookup, err := s.feedFactory.NewLookup(feeds.Sequence, f)
if err != nil {
@@ -85,7 +84,6 @@ func (s *Service) feedGetHandler(w http.ResponseWriter, r *http.Request) {
}
return
}
-
ch, cur, next, err := lookup.At(r.Context(), queries.At, queries.After)
if err != nil {
logger.Debug("lookup at failed", "at", queries.At, "error", err)
@@ -102,7 +100,7 @@ func (s *Service) feedGetHandler(w http.ResponseWriter, r *http.Request) {
return
}
- wc, err := feeds.GetWrappedChunk(r.Context(), s.storer.Download(false), ch)
+ wc, feedVer, err := s.resolveFeed(r.Context(), s.storer.Download(false), ch)
if err != nil {
logger.Error(nil, "wrapped chunk cannot be retrieved")
jsonhttp.NotFound(w, "wrapped chunk cannot be retrieved")
@@ -134,11 +132,12 @@ func (s *Service) feedGetHandler(w http.ResponseWriter, r *http.Request) {
sig := socCh.Signature()
additionalHeaders := http.Header{
- ContentTypeHeader: {"application/octet-stream"},
- SwarmFeedIndexHeader: {hex.EncodeToString(curBytes)},
- SwarmFeedIndexNextHeader: {hex.EncodeToString(nextBytes)},
- SwarmSocSignatureHeader: {hex.EncodeToString(sig)},
- AccessControlExposeHeaders: {SwarmFeedIndexHeader, SwarmFeedIndexNextHeader, SwarmSocSignatureHeader},
+ ContentTypeHeader: {"application/octet-stream"},
+ SwarmFeedIndexHeader: {hex.EncodeToString(curBytes)},
+ SwarmFeedIndexNextHeader: {hex.EncodeToString(nextBytes)},
+ SwarmSocSignatureHeader: {hex.EncodeToString(sig)},
+ SwarmFeedResolvedVersionHeader: {feedVer},
+ AccessControlExposeHeaders: {SwarmFeedIndexHeader, SwarmFeedIndexNextHeader, SwarmSocSignatureHeader},
}
if headers.OnlyRootChunk {
diff --git a/pkg/api/feed_test.go b/pkg/api/feed_test.go
index a30c7d086da..e33eba63d0c 100644
--- a/pkg/api/feed_test.go
+++ b/pkg/api/feed_test.go
@@ -14,6 +14,7 @@ import (
"io"
"math/big"
"net/http"
+ "net/url"
"testing"
"github.com/ethersphere/bee/v2/pkg/api"
@@ -43,10 +44,16 @@ func TestFeed_Get(t *testing.T) {
var (
feedResource = func(owner, topic, at string) string {
+ values := url.Values{}
if at != "" {
- return fmt.Sprintf("/feeds/%s/%s?at=%s", owner, topic, at)
+ values.Set("at", at)
}
- return fmt.Sprintf("/feeds/%s/%s", owner, topic)
+
+ baseURL := fmt.Sprintf("/feeds/%s/%s", owner, topic)
+ if len(values) > 0 {
+ return baseURL + "?" + values.Encode()
+ }
+ return baseURL
}
mockStorer = mockstorer.New()
)
@@ -83,10 +90,11 @@ func TestFeed_Get(t *testing.T) {
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmSocSignatureHeader),
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.ContentDispositionHeader),
jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/octet-stream"),
+ jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedResolvedVersionHeader, "v1"),
)
})
- t.Run("latest", func(t *testing.T) {
+ t.Run("latest with legacy payload", func(t *testing.T) {
t.Parallel()
var (
@@ -111,6 +119,7 @@ func TestFeed_Get(t *testing.T) {
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmSocSignatureHeader),
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.ContentDispositionHeader),
jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/octet-stream"),
+ jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedResolvedVersionHeader, "v1"),
)
})
@@ -140,11 +149,18 @@ func TestFeed_Get(t *testing.T) {
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmSocSignatureHeader),
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.ContentDispositionHeader),
jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/octet-stream"),
+ jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedResolvedVersionHeader, "v2"),
)
})
t.Run("legacy payload with non existing wrapped chunk", func(t *testing.T) {
- t.Parallel()
+ t.Skip()
+ /*
+ This test has been disabled since it cannot be supported with the automatic
+ Feed resolution logic that is now in place. In case automatic feed resolution
+ would be removed at some point, this test can be reactived. The issue is
+ thoroughly described in the PR: https://github.com/ethersphere/bee/pull/5287
+ */
wrappedRef := make([]byte, swarm.HashSize)
_ = copy(wrappedRef, mockWrappedCh.Address().Bytes())
@@ -164,6 +180,22 @@ func TestFeed_Get(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodGet, feedResource(ownerString, "aabbcc", ""), http.StatusNotFound)
})
+ t.Run("query parameter legacy feed resolve", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ look = newMockLookup(1, 0, nil, errors.New("dummy"), &id{}, &id{})
+ factory = newMockFactory(look)
+ client, _, _, _ = newTestServer(t, testServerOptions{
+ Storer: mockStorer,
+ Feeds: factory,
+ })
+ )
+
+ // Test with the legacyFeed parameter set to true which should add the query parameter
+ jsonhttptest.Request(t, client, http.MethodGet, feedResource(ownerString, "aabbcc", ""), http.StatusNotFound)
+ })
+
t.Run("bigger payload than one chunk", func(t *testing.T) {
t.Parallel()
@@ -202,6 +234,7 @@ func TestFeed_Get(t *testing.T) {
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmSocSignatureHeader),
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.ContentDispositionHeader),
jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/octet-stream"),
+ jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedResolvedVersionHeader, "v2"),
)
})
@@ -215,6 +248,7 @@ func TestFeed_Get(t *testing.T) {
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmFeedIndexNextHeader),
jsonhttptest.WithExpectedResponseHeader(api.AccessControlExposeHeaders, api.SwarmSocSignatureHeader),
jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/octet-stream"),
+ jsonhttptest.WithExpectedResponseHeader(api.SwarmFeedResolvedVersionHeader, "v2"),
)
})
})
@@ -357,9 +391,11 @@ func (l *mockLookup) At(_ context.Context, at int64, after uint64) (swarm.Chunk,
// shortcut to ignore the value in the call since time.Now() is a moving target
return l.chunk, l.cur, l.next, nil
}
+
if at == l.at && after == l.after {
return l.chunk, l.cur, l.next, nil
}
+
return nil, nil, nil, errors.New("no feed update found")
}
diff --git a/pkg/api/logger_test.go b/pkg/api/logger_test.go
index 1e13917535f..78caf7078f3 100644
--- a/pkg/api/logger_test.go
+++ b/pkg/api/logger_test.go
@@ -65,8 +65,8 @@ func TestGetLoggers(t *testing.T) {
}
api.ReplaceLogRegistryIterateFn(fn)
- have := make(map[string]interface{})
- want := make(map[string]interface{})
+ have := make(map[string]any)
+ want := make(map[string]any)
data := `{"loggers":[{"id":"b25lWzBdW10-PjgyNDYzNDg2MDM2MA==","logger":"one","subsystem":"one[0][]\u003e\u003e824634860360","verbosity":"all"},{"id":"b25lL25hbWVbMF1bXT4-ODI0NjM0ODYwMzYw","logger":"one/name","subsystem":"one/name[0][]\u003e\u003e824634860360","verbosity":"warning"},{"id":"b25lL25hbWVbMF1bXCJ2YWxcIj0xXT4-ODI0NjM0ODYwMzYw","logger":"one/name","subsystem":"one/name[0][\\\"val\\\"=1]\u003e\u003e824634860360","verbosity":"warning"},{"id":"b25lL25hbWVbMV1bXT4-ODI0NjM0ODYwMzYw","logger":"one/name","subsystem":"one/name[1][]\u003e\u003e824634860360","verbosity":"info"},{"id":"b25lL25hbWVbMl1bXT4-ODI0NjM0ODYwMzYw","logger":"one/name","subsystem":"one/name[2][]\u003e\u003e824634860360","verbosity":"info"}],"tree":{"one":{"+":["all|one[0][]\u003e\u003e824634860360"],"/":{"name":{"+":["warning|one/name[0][]\u003e\u003e824634860360","warning|one/name[0][\\\"val\\\"=1]\u003e\u003e824634860360","info|one/name[1][]\u003e\u003e824634860360","info|one/name[2][]\u003e\u003e824634860360"]}}}}}`
if err := json.Unmarshal([]byte(data), &want); err != nil {
t.Fatalf("unexpected error: %v", err)
diff --git a/pkg/api/peer.go b/pkg/api/peer.go
index 3213b55a8ef..1c918b98fa9 100644
--- a/pkg/api/peer.go
+++ b/pkg/api/peer.go
@@ -31,7 +31,8 @@ func (s *Service) peerConnectHandler(w http.ResponseWriter, r *http.Request) {
return
}
- bzzAddr, err := s.p2p.Connect(r.Context(), paths.MultiAddress)
+ bzzAddr, err := s.p2p.Connect(r.Context(), []multiaddr.Multiaddr{paths.MultiAddress})
+
if err != nil {
logger.Debug("p2p connect failed", "addresses", paths.MultiAddress, "error", err)
logger.Error(nil, "p2p connect failed", "addresses", paths.MultiAddress)
diff --git a/pkg/api/peer_test.go b/pkg/api/peer_test.go
index a9386d8d694..1175e7d6bc4 100644
--- a/pkg/api/peer_test.go
+++ b/pkg/api/peer_test.go
@@ -45,22 +45,23 @@ func TestConnect(t *testing.T) {
t.Fatal(err)
}
- bzzAddress, err := bzz.NewAddress(crypto.NewDefaultSigner(privateKey), underlama, overlay, 0, nil)
+ bzzAddress, err := bzz.NewAddress(crypto.NewDefaultSigner(privateKey), []ma.Multiaddr{underlama}, overlay, 0, nil)
if err != nil {
t.Fatal(err)
}
testServer, _, _, _ := newTestServer(t, testServerOptions{
- P2P: mock.New(mock.WithConnectFunc(func(ctx context.Context, addr ma.Multiaddr) (*bzz.Address, error) {
- if addr.String() == errorUnderlay {
- return nil, testErr
+ P2P: mock.New(mock.WithConnectFunc(func(ctx context.Context, addrs []ma.Multiaddr) (*bzz.Address, error) {
+ for _, addr := range addrs {
+ if addr.String() == errorUnderlay {
+ return nil, testErr
+ }
}
return bzzAddress, nil
})),
})
t.Run("ok", func(t *testing.T) {
- t.Parallel()
jsonhttptest.Request(t, testServer, http.MethodPost, "/connect"+underlay, http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(api.PeerConnectResponse{
Address: overlay.String(),
@@ -69,7 +70,6 @@ func TestConnect(t *testing.T) {
})
t.Run("error", func(t *testing.T) {
- t.Parallel()
jsonhttptest.Request(t, testServer, http.MethodPost, "/connect"+errorUnderlay, http.StatusInternalServerError,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Code: http.StatusInternalServerError,
@@ -79,11 +79,12 @@ func TestConnect(t *testing.T) {
})
t.Run("error - add peer", func(t *testing.T) {
- t.Parallel()
testServer, _, _, _ := newTestServer(t, testServerOptions{
- P2P: mock.New(mock.WithConnectFunc(func(ctx context.Context, addr ma.Multiaddr) (*bzz.Address, error) {
- if addr.String() == errorUnderlay {
- return nil, testErr
+ P2P: mock.New(mock.WithConnectFunc(func(ctx context.Context, addrs []ma.Multiaddr) (*bzz.Address, error) {
+ for _, addr := range addrs {
+ if addr.String() == errorUnderlay {
+ return nil, testErr
+ }
}
return bzzAddress, nil
})),
diff --git a/pkg/api/postage.go b/pkg/api/postage.go
index 9d6aa2bcc11..d9de74ca354 100644
--- a/pkg/api/postage.go
+++ b/pkg/api/postage.go
@@ -366,9 +366,10 @@ func (s *Service) postageGetStampHandler(w http.ResponseWriter, r *http.Request)
}
type reserveStateResponse struct {
- Radius uint8 `json:"radius"`
- StorageRadius uint8 `json:"storageRadius"`
- Commitment uint64 `json:"commitment"`
+ Radius uint8 `json:"radius"`
+ StorageRadius uint8 `json:"storageRadius"`
+ Commitment uint64 `json:"commitment"`
+ ReserveCapacityDoubling uint8 `json:"reserveCapacityDoubling"`
}
type chainStateResponse struct {
@@ -390,9 +391,10 @@ func (s *Service) reserveStateHandler(w http.ResponseWriter, _ *http.Request) {
}
jsonhttp.OK(w, reserveStateResponse{
- Radius: s.batchStore.Radius(),
- StorageRadius: s.storer.StorageRadius(),
- Commitment: commitment,
+ Radius: s.batchStore.Radius(),
+ StorageRadius: s.storer.StorageRadius(),
+ Commitment: commitment,
+ ReserveCapacityDoubling: s.storer.CapacityDoubling(),
})
}
diff --git a/pkg/api/postage_test.go b/pkg/api/postage_test.go
index 6ba9c812a69..53ac6ccb8ae 100644
--- a/pkg/api/postage_test.go
+++ b/pkg/api/postage_test.go
@@ -374,13 +374,18 @@ func TestReserveState(t *testing.T) {
t.Run("ok", func(t *testing.T) {
t.Parallel()
+ s := mockstorer.New()
+ s.SetStorageRadius(3)
+ s.SetCommittedDepth(5)
ts, _, _, _ := newTestServer(t, testServerOptions{
BatchStore: mock.New(mock.WithRadius(5)),
- Storer: mockstorer.New(),
+ Storer: s,
})
jsonhttptest.Request(t, ts, http.MethodGet, "/reservestate", http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&api.ReserveStateResponse{
- Radius: 5,
+ Radius: 5,
+ StorageRadius: 3,
+ ReserveCapacityDoubling: 2,
}),
)
})
@@ -697,7 +702,7 @@ func TestPostageAccessHandler(t *testing.T) {
method string
url string
respCode int
- resp interface{}
+ resp any
}
success := []operation{
diff --git a/pkg/api/pss.go b/pkg/api/pss.go
index ef1c3a84d47..03e9182585d 100644
--- a/pkg/api/pss.go
+++ b/pkg/api/pss.go
@@ -24,8 +24,7 @@ import (
)
const (
- writeDeadline = 4 * time.Second // write deadline. should be smaller than the shutdown timeout on api close
- targetMaxLength = 3 // max target length in bytes, in order to prevent grieving by excess computation
+ writeDeadline = 4 * time.Second // write deadline. should be smaller than the shutdown timeout on api close
)
func (s *Service) pssPostHandler(w http.ResponseWriter, r *http.Request) {
@@ -42,7 +41,7 @@ func (s *Service) pssPostHandler(w http.ResponseWriter, r *http.Request) {
topic := pss.NewTopic(paths.Topic)
var targets pss.Targets
- for _, v := range strings.Split(paths.Targets, ",") {
+ for v := range strings.SplitSeq(paths.Targets, ",") {
target := struct {
Val []byte `map:"target" validate:"required,max=3"`
}{}
diff --git a/pkg/api/redistribution_test.go b/pkg/api/redistribution_test.go
index 07b4bc28cfe..eef01977866 100644
--- a/pkg/api/redistribution_test.go
+++ b/pkg/api/redistribution_test.go
@@ -46,8 +46,8 @@ func TestRedistributionStatus(t *testing.T) {
backendmock.WithBalanceAt(func(ctx context.Context, address common.Address, block *big.Int) (*big.Int, error) {
return big.NewInt(100000000), nil
}),
- backendmock.WithSuggestGasPriceFunc(func(ctx context.Context) (*big.Int, error) {
- return big.NewInt(1), nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return big.NewInt(1), big.NewInt(2), nil
}),
},
})
diff --git a/pkg/api/router.go b/pkg/api/router.go
index 714217c9c11..776def9457d 100644
--- a/pkg/api/router.go
+++ b/pkg/api/router.go
@@ -14,6 +14,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/jsonhttp"
"github.com/ethersphere/bee/v2/pkg/log/httpaccess"
"github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/transaction/backendnoop"
"github.com/felixge/fgprof"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
@@ -191,7 +192,7 @@ func (s *Service) checkRouteAvailability(handler http.Handler) http.Handler {
func (s *Service) checkSwapAvailability(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !s.swapEnabled {
- jsonhttp.NotImplemented(w, "Swap is disabled. This endpoint is unavailable.")
+ jsonhttp.Forbidden(w, "Swap is disabled. This endpoint is unavailable.")
return
}
handler.ServeHTTP(w, r)
@@ -201,13 +202,34 @@ func (s *Service) checkSwapAvailability(handler http.Handler) http.Handler {
func (s *Service) checkChequebookAvailability(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !s.chequebookEnabled {
- jsonhttp.NotImplemented(w, "Chequebook is disabled. This endpoint is unavailable.")
+ jsonhttp.Forbidden(w, "Chequebook is disabled. This endpoint is unavailable.")
return
}
handler.ServeHTTP(w, r)
})
}
+func (s *Service) checkStorageIncentivesAvailability(handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if s.redistributionAgent == nil {
+ jsonhttp.Forbidden(w, "Storage incentives are disabled. This endpoint is unavailable.")
+ return
+ }
+ handler.ServeHTTP(w, r)
+ })
+}
+
+func (s *Service) checkChainAvailability(handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if _, ok := s.chainBackend.(*backendnoop.Backend); ok {
+ jsonhttp.Forbidden(w, "Chain is disabled. This endpoint is unavailable.")
+ return
+ }
+
+ handler.ServeHTTP(w, r)
+ })
+}
+
func (s *Service) mountAPI() {
subdomainRouter := s.router.Host("{subdomain:.*}.swarm.localhost").Subrouter()
@@ -554,6 +576,7 @@ func (s *Service) mountBusinessDebug() {
))
handle("/stamps", web.ChainHandlers(
+ s.checkChainAvailability,
s.postageSyncStatusCheckHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampsHandler),
@@ -561,6 +584,7 @@ func (s *Service) mountBusinessDebug() {
)
handle("/stamps/{batch_id}", web.ChainHandlers(
+ s.checkChainAvailability,
s.postageSyncStatusCheckHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampHandler),
@@ -568,6 +592,7 @@ func (s *Service) mountBusinessDebug() {
)
handle("/stamps/{batch_id}/buckets", web.ChainHandlers(
+ s.checkChainAvailability,
s.postageSyncStatusCheckHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampBucketsHandler),
@@ -575,6 +600,7 @@ func (s *Service) mountBusinessDebug() {
)
handle("/stamps/{amount}/{depth}", web.ChainHandlers(
+ s.checkChainAvailability,
s.postageAccessHandler,
s.postageSyncStatusCheckHandler,
s.gasConfigMiddleware("create batch"),
@@ -584,6 +610,7 @@ func (s *Service) mountBusinessDebug() {
)
handle("/stamps/topup/{batch_id}/{amount}", web.ChainHandlers(
+ s.checkChainAvailability,
s.postageAccessHandler,
s.postageSyncStatusCheckHandler,
s.gasConfigMiddleware("topup batch"),
@@ -593,6 +620,7 @@ func (s *Service) mountBusinessDebug() {
)
handle("/stamps/dilute/{batch_id}/{depth}", web.ChainHandlers(
+ s.checkChainAvailability,
s.postageAccessHandler,
s.postageSyncStatusCheckHandler,
s.gasConfigMiddleware("dilute batch"),
@@ -635,9 +663,12 @@ func (s *Service) mountBusinessDebug() {
})),
)
- handle("/redistributionstate", jsonhttp.MethodHandler{
- "GET": http.HandlerFunc(s.redistributionStatusHandler),
- })
+ handle("/redistributionstate", web.ChainHandlers(
+ s.checkStorageIncentivesAvailability,
+ web.FinalHandler(jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.redistributionStatusHandler),
+ })),
+ )
handle("/status", jsonhttp.MethodHandler{
"GET": web.ChainHandlers(
diff --git a/pkg/api/router_test.go b/pkg/api/router_test.go
index 22ba15c8cde..e4cabc8a4c1 100644
--- a/pkg/api/router_test.go
+++ b/pkg/api/router_test.go
@@ -281,17 +281,17 @@ func TestEndpointOptions(t *testing.T) {
{"/consumed", []string{"GET"}, http.StatusNoContent},
{"/consumed/{peer}", []string{"GET"}, http.StatusNoContent},
{"/timesettlements", []string{"GET"}, http.StatusNoContent},
- {"/settlements", nil, http.StatusNotImplemented},
- {"/settlements/{peer}", nil, http.StatusNotImplemented},
- {"/chequebook/cheque/{peer}", nil, http.StatusNotImplemented},
- {"/chequebook/cheque", nil, http.StatusNotImplemented},
- {"/chequebook/cashout/{peer}", nil, http.StatusNotImplemented},
+ {"/settlements", nil, http.StatusForbidden},
+ {"/settlements/{peer}", nil, http.StatusForbidden},
+ {"/chequebook/cheque/{peer}", nil, http.StatusForbidden},
+ {"/chequebook/cheque", nil, http.StatusForbidden},
+ {"/chequebook/cashout/{peer}", nil, http.StatusForbidden},
{"/chequebook/balance", []string{"GET"}, http.StatusNoContent},
{"/chequebook/address", []string{"GET"}, http.StatusNoContent},
{"/chequebook/deposit", []string{"POST"}, http.StatusNoContent},
{"/chequebook/withdraw", []string{"POST"}, http.StatusNoContent},
- {"/wallet", nil, http.StatusNotImplemented},
- {"/wallet/withdraw/{coin}", nil, http.StatusNotImplemented},
+ {"/wallet", nil, http.StatusForbidden},
+ {"/wallet/withdraw/{coin}", nil, http.StatusForbidden},
{"/stamps", []string{"GET"}, http.StatusNoContent},
{"/stamps/{batch_id}", []string{"GET"}, http.StatusNoContent},
{"/stamps/{batch_id}/buckets", []string{"GET"}, http.StatusNoContent},
@@ -381,12 +381,12 @@ func TestEndpointOptions(t *testing.T) {
{"/chequebook/cheque/{peer}", []string{"GET"}, http.StatusNoContent},
{"/chequebook/cheque", []string{"GET"}, http.StatusNoContent},
{"/chequebook/cashout/{peer}", []string{"GET", "POST"}, http.StatusNoContent},
- {"/chequebook/balance", nil, http.StatusNotImplemented},
- {"/chequebook/address", nil, http.StatusNotImplemented},
- {"/chequebook/deposit", nil, http.StatusNotImplemented},
- {"/chequebook/withdraw", nil, http.StatusNotImplemented},
- {"/wallet", nil, http.StatusNotImplemented},
- {"/wallet/withdraw/{coin}", nil, http.StatusNotImplemented},
+ {"/chequebook/balance", nil, http.StatusForbidden},
+ {"/chequebook/address", nil, http.StatusForbidden},
+ {"/chequebook/deposit", nil, http.StatusForbidden},
+ {"/chequebook/withdraw", nil, http.StatusForbidden},
+ {"/wallet", nil, http.StatusForbidden},
+ {"/wallet/withdraw/{coin}", nil, http.StatusForbidden},
{"/stamps", []string{"GET"}, http.StatusNoContent},
{"/stamps/{batch_id}", []string{"GET"}, http.StatusNoContent},
{"/stamps/{batch_id}/buckets", []string{"GET"}, http.StatusNoContent},
diff --git a/pkg/api/soc_test.go b/pkg/api/soc_test.go
index fb34eb82297..b1240708300 100644
--- a/pkg/api/soc_test.go
+++ b/pkg/api/soc_test.go
@@ -95,6 +95,12 @@ func TestSOC(t *testing.T) {
}),
)
+ // Wait for the chanStorer drain goroutine to process the chunk and invoke
+ // the subscriber (which stores it in mockStorer) before issuing GETs.
+ if err := spinlock.Wait(2*time.Second, func() bool { return chanStore.Has(s.Address()) }); err != nil {
+ t.Fatal("timed out waiting for chunk to be stored:", err)
+ }
+
// try to fetch the same chunk
t.Run("chunks fetch", func(t *testing.T) {
rsrc := fmt.Sprintf("/chunks/%s", s.Address().String())
diff --git a/pkg/api/tag.go b/pkg/api/tag.go
index 6a38fd7a65b..67ebf7b7acf 100644
--- a/pkg/api/tag.go
+++ b/pkg/api/tag.go
@@ -19,7 +19,7 @@ import (
)
type tagRequest struct {
- Address swarm.Address `json:"address,omitempty"`
+ Address swarm.Address `json:"address"`
}
type tagResponse struct {
diff --git a/pkg/api/util.go b/pkg/api/util.go
index 6fdb575873a..d8911f4ecf4 100644
--- a/pkg/api/util.go
+++ b/pkg/api/util.go
@@ -132,7 +132,7 @@ var flattenErrorsFormat = func(es []error) string {
//
// In case of parsing error, a new parseError is returned to the caller.
// The caller can use the Unwrap method to get the original error.
-func mapStructure(input, output interface{}, hooks map[string]func(v string) (string, error)) (err error) {
+func mapStructure(input, output any, hooks map[string]func(v string) (string, error)) (err error) {
if input == nil || output == nil {
return nil
}
@@ -210,14 +210,23 @@ func mapStructure(input, output interface{}, hooks map[string]func(v string) (st
case reflect.String:
field.SetString(value)
case reflect.Slice:
- if value == "" {
- return nil // Nil slice.
- }
- val, err := hex.DecodeString(value)
- if err != nil {
- return err
+ switch field.Type() {
+ case reflect.TypeFor[multiaddr.Multiaddr]():
+ val, err := multiaddr.NewMultiaddr(value)
+ if err != nil {
+ return err
+ }
+ field.Set(reflect.ValueOf(val))
+ default:
+ if value == "" {
+ return nil // Nil slice.
+ }
+ val, err := hex.DecodeString(value)
+ if err != nil {
+ return err
+ }
+ field.SetBytes(val)
}
- field.SetBytes(val)
case reflect.Array:
switch field.Interface().(type) {
case common.Hash:
@@ -251,15 +260,6 @@ func mapStructure(input, output interface{}, hooks map[string]func(v string) (st
}
field.Set(reflect.ValueOf(*val))
}
- case reflect.Interface:
- switch field.Type() {
- case reflect.TypeOf((*multiaddr.Multiaddr)(nil)).Elem():
- val, err := multiaddr.NewMultiaddr(value)
- if err != nil {
- return err
- }
- field.Set(reflect.ValueOf(val))
- }
default:
return fmt.Errorf("unsupported type %T", field.Interface())
}
diff --git a/pkg/api/util_test.go b/pkg/api/util_test.go
index 4628ce31270..b35280de4bd 100644
--- a/pkg/api/util_test.go
+++ b/pkg/api/util_test.go
@@ -17,6 +17,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/api"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/google/go-cmp/cmp"
+ "github.com/multiformats/go-multiaddr"
)
type (
@@ -95,15 +96,24 @@ type (
mapSwarmAddressTest struct {
SwarmAddressVal swarm.Address `map:"swarmAddressVal"`
}
+
+ mapMultiaddrTest struct {
+ MultiaddrVal multiaddr.Multiaddr `map:"multiaddrVal"`
+ }
)
func TestMapStructure(t *testing.T) {
t.Parallel()
+ validMultiaddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/8080")
+ if err != nil {
+ t.Fatal(err)
+ }
+
tests := []struct {
name string
- src interface{}
- want interface{}
+ src any
+ want any
wantErr error
}{{
name: "bool zero value",
@@ -497,6 +507,10 @@ func TestMapStructure(t *testing.T) {
name: "swarm.Address value",
src: map[string]string{"swarmAddressVal": "1234567890abcdef"},
want: &mapSwarmAddressTest{SwarmAddressVal: swarm.MustParseHexAddress("1234567890abcdef")},
+ }, {
+ name: "multiaddr.Multiaddr value",
+ src: map[string]string{"multiaddrVal": "/ip4/127.0.0.1/tcp/8080"},
+ want: &mapMultiaddrTest{MultiaddrVal: validMultiaddr},
}}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
@@ -520,7 +534,7 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) {
t.Run("input is nil", func(t *testing.T) {
t.Parallel()
- var input interface{}
+ var input any
err := api.MapStructure(input, struct{}{}, nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
@@ -541,7 +555,7 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) {
t.Parallel()
var (
- input = map[string]interface{}{"someVal": "123"}
+ input = map[string]any{"someVal": "123"}
output struct {
SomeVal string `map:"someVal"`
}
@@ -556,8 +570,8 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) {
t.Parallel()
var (
- input = map[string]interface{}{"someVal": "123"}
- output interface{}
+ input = map[string]any{"someVal": "123"}
+ output any
)
err := api.MapStructure(&input, output, nil)
if err != nil {
@@ -569,7 +583,7 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) {
t.Parallel()
var (
- input = map[string]interface{}{"someVal": "123"}
+ input = map[string]any{"someVal": "123"}
output = struct {
SomeVal string `map:"someVal"`
}{}
@@ -584,7 +598,7 @@ func TestMapStructure_InputOutputSanityCheck(t *testing.T) {
t.Parallel()
var (
- input = map[string]interface{}{"someVal": "123"}
+ input = map[string]any{"someVal": "123"}
output = "foo"
)
err := api.MapStructure(&input, &output, nil)
diff --git a/pkg/api/validation.go b/pkg/api/validation.go
new file mode 100644
index 00000000000..2e1422ff414
--- /dev/null
+++ b/pkg/api/validation.go
@@ -0,0 +1,34 @@
+// Copyright 2026 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package api
+
+import (
+ "fmt"
+
+ "github.com/ethersphere/bee/v2/pkg/file/redundancy"
+ "github.com/go-playground/validator/v10"
+)
+
+const (
+ RedundancyLevelTag = "rLevel"
+)
+
+// setupValidation configures custom validation rules and their custom error messages.
+func (s *Service) setupValidation() {
+ err := s.validate.RegisterValidation(RedundancyLevelTag, func(fl validator.FieldLevel) bool {
+ level := redundancy.Level(fl.Field().Uint())
+ return level.Validate()
+ })
+ if err != nil {
+ s.logger.Error(err, "failed to register validation")
+ panic(err)
+ }
+
+ s.customValidationMessages = map[string]func(err validator.FieldError) error{
+ RedundancyLevelTag: func(err validator.FieldError) error {
+ return fmt.Errorf("want redundancy level to be between %d and %d", int(redundancy.NONE), int(redundancy.PARANOID))
+ },
+ }
+}
diff --git a/pkg/bitvector/bitvector_test.go b/pkg/bitvector/bitvector_test.go
index d986c5624d8..c2a74e4c67e 100644
--- a/pkg/bitvector/bitvector_test.go
+++ b/pkg/bitvector/bitvector_test.go
@@ -64,7 +64,7 @@ func TestBitvectorGetSet(t *testing.T) {
t.Errorf("error for length %v: %v", length, err)
}
- for i := 0; i < length; i++ {
+ for i := range length {
if bv.Get(i) {
t.Errorf("expected false for element on index %v", i)
}
@@ -79,9 +79,9 @@ func TestBitvectorGetSet(t *testing.T) {
bv.Get(length + 8)
}()
- for i := 0; i < length; i++ {
+ for i := range length {
bv.Set(i)
- for j := 0; j < length; j++ {
+ for j := range length {
if j == i {
if !bv.Get(j) {
t.Errorf("element on index %v is not set to true", i)
diff --git a/pkg/blocker/blocker.go b/pkg/blocker/blocker.go
index 5b916c81732..2d3deb10bf1 100644
--- a/pkg/blocker/blocker.go
+++ b/pkg/blocker/blocker.go
@@ -61,9 +61,7 @@ func New(blocklister p2p.Blocklister, flagTimeout, blockDuration, wakeUpTime tim
blocklistCallback: callback,
}
- b.closeWg.Add(1)
- go func() {
- defer b.closeWg.Done()
+ b.closeWg.Go(func() {
for {
select {
case <-b.quit:
@@ -74,11 +72,9 @@ func New(blocklister p2p.Blocklister, flagTimeout, blockDuration, wakeUpTime tim
}
}
}
- }()
+ })
- b.closeWg.Add(1)
- go func() {
- defer b.closeWg.Done()
+ b.closeWg.Go(func() {
for {
select {
case <-time.After(wakeUpTime):
@@ -87,7 +83,7 @@ func New(blocklister p2p.Blocklister, flagTimeout, blockDuration, wakeUpTime tim
return
}
}
- }()
+ })
return b
}
diff --git a/pkg/blocker/blocker_test.go b/pkg/blocker/blocker_test.go
index d31a815322d..a938204de2d 100644
--- a/pkg/blocker/blocker_test.go
+++ b/pkg/blocker/blocker_test.go
@@ -7,6 +7,7 @@ package blocker_test
import (
"os"
"testing"
+ "testing/synctest"
"time"
"go.uber.org/goleak"
@@ -36,98 +37,98 @@ func TestMain(m *testing.M) {
}
func TestBlocksAfterFlagTimeout(t *testing.T) {
- t.Parallel()
+ synctest.Test(t, func(t *testing.T) {
+ addr := swarm.RandAddress(t)
+ blockedC := make(chan swarm.Address, 10)
- addr := swarm.RandAddress(t)
- blockedC := make(chan swarm.Address, 10)
+ mock := mockBlockLister(func(a swarm.Address, d time.Duration, r string) error {
+ blockedC <- a
- mock := mockBlockLister(func(a swarm.Address, d time.Duration, r string) error {
- blockedC <- a
+ if d != blockTime {
+ t.Fatalf("block time: want %v, got %v", blockTime, d)
+ }
- if d != blockTime {
- t.Fatalf("block time: want %v, got %v", blockTime, d)
- }
-
- return nil
- })
+ return nil
+ })
- b := blocker.New(mock, flagTime, blockTime, time.Millisecond, nil, log.Noop)
- testutil.CleanupCloser(t, b)
+ b := blocker.New(mock, flagTime, blockTime, time.Millisecond, nil, log.Noop)
+ testutil.CleanupCloser(t, b)
- // Flagging address shouldn't block it immediately
- b.Flag(addr)
- if len(blockedC) != 0 {
- t.Fatal("blocker did not wait flag duration")
- }
+ // Flagging address shouldn't block it immediately
+ b.Flag(addr)
+ if len(blockedC) != 0 {
+ t.Fatal("blocker did not wait flag duration")
+ }
- time.Sleep(flagTime / 2)
- b.Flag(addr) // check that this flag call does not override previous call
- if len(blockedC) != 0 {
- t.Fatal("blocker did not wait flag duration")
- }
+ synctest.Wait()
+ b.Flag(addr) // check that this flag call does not override previous call
+ if len(blockedC) != 0 {
+ t.Fatal("blocker did not wait flag duration")
+ }
- // Suspending current goroutine and expect that in this interval
- // block listener was called to block flagged address
- time.Sleep(flagTime * 3)
+ // Suspending current goroutine and expect that in this interval
+ // block listener was called to block flagged address
+ synctest.Wait()
- if a := <-blockedC; !a.Equal(addr) {
- t.Fatalf("expecting flagged address to be blocked")
- }
- if len(blockedC) != 0 {
- t.Fatalf("address should only be blocked once")
- }
+ if a := <-blockedC; !a.Equal(addr) {
+ t.Fatalf("expecting flagged address to be blocked")
+ }
+ if len(blockedC) != 0 {
+ t.Fatalf("address should only be blocked once")
+ }
+ })
}
func TestUnflagBeforeBlock(t *testing.T) {
- t.Parallel()
+ synctest.Test(t, func(t *testing.T) {
+ addr := swarm.RandAddress(t)
+ mock := mockBlockLister(func(a swarm.Address, d time.Duration, r string) error {
+ t.Fatalf("address should not be blocked")
- addr := swarm.RandAddress(t)
- mock := mockBlockLister(func(a swarm.Address, d time.Duration, r string) error {
- t.Fatalf("address should not be blocked")
-
- return nil
- })
+ return nil
+ })
- b := blocker.New(mock, flagTime, blockTime, time.Millisecond, nil, log.Noop)
- testutil.CleanupCloser(t, b)
+ b := blocker.New(mock, flagTime, blockTime, time.Millisecond, nil, log.Noop)
+ testutil.CleanupCloser(t, b)
- // Flagging address shouldn't block it imidietly
- b.Flag(addr)
+ // Flagging address shouldn't block it immediately
+ b.Flag(addr)
- time.Sleep(flagTime / 2)
- b.Flag(addr) // check that this flag call does not override previous call
+ synctest.Wait()
+ b.Flag(addr) // check that this flag call does not override previous call
- b.Unflag(addr)
+ b.Unflag(addr)
- // Suspending current goroutine and expect that in this interval
- // block listener was not called to block flagged address
- time.Sleep(flagTime * 3)
+ // Suspending current goroutine and expect that in this interval
+ // block listener was not called to block flagged address
+ synctest.Wait()
+ })
}
func TestPruneBeforeBlock(t *testing.T) {
- t.Parallel()
-
- addr := swarm.RandAddress(t)
- mock := mockBlockLister(func(a swarm.Address, d time.Duration, r string) error {
- t.Fatalf("address should not be blocked")
+ synctest.Test(t, func(t *testing.T) {
+ addr := swarm.RandAddress(t)
+ mock := mockBlockLister(func(a swarm.Address, d time.Duration, r string) error {
+ t.Fatalf("address should not be blocked")
- return nil
- })
+ return nil
+ })
- b := blocker.New(mock, flagTime, blockTime, time.Millisecond, nil, log.Noop)
- testutil.CleanupCloser(t, b)
+ b := blocker.New(mock, flagTime, blockTime, time.Millisecond, nil, log.Noop)
+ testutil.CleanupCloser(t, b)
- // Flagging address shouldn't block it imidietly
- b.Flag(addr)
+ // Flagging address shouldn't block it immediately
+ b.Flag(addr)
- time.Sleep(flagTime / 2)
+ synctest.Wait()
- // communicate that we have seen no peers, resulting in the peer being removed
- b.PruneUnseen([]swarm.Address{})
+ // communicate that we have seen no peers, resulting in the peer being removed
+ b.PruneUnseen([]swarm.Address{})
- // Suspending current goroutine expect that in this interval
- // block listener was not called to block flagged address
- time.Sleep(flagTime * 3)
+ // Suspending current goroutine expect that in this interval
+ // block listener was not called to block flagged address
+ synctest.Wait()
+ })
}
type blocklister struct {
diff --git a/pkg/bmt/benchmark_test.go b/pkg/bmt/benchmark_test.go
index acf0ab4fc83..aa6cc1703dd 100644
--- a/pkg/bmt/benchmark_test.go
+++ b/pkg/bmt/benchmark_test.go
@@ -47,8 +47,8 @@ func benchmarkSHA3(b *testing.B, n int) {
testData := testutil.RandBytesWithSeed(b, 4096, seed)
b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
+
+ for b.Loop() {
if _, err := bmt.Sha3hash(testData[:n]); err != nil {
b.Fatalf("seed %d: %v", seed, err)
}
@@ -66,10 +66,10 @@ func benchmarkBMTBaseline(b *testing.B, _ int) {
testData := testutil.RandBytesWithSeed(b, 4096, seed)
b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
+
+ for b.Loop() {
eg := new(errgroup.Group)
- for j := 0; j < testSegmentCount; j++ {
+ for range testSegmentCount {
eg.Go(func() error {
_, err := bmt.Sha3hash(testData[:hashSize])
return err
@@ -92,8 +92,8 @@ func benchmarkBMT(b *testing.B, n int) {
defer pool.Put(h)
b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
+
+ for b.Loop() {
if _, err := syncHash(h, testData[:n]); err != nil {
b.Fatalf("seed %d: %v", seed, err)
}
@@ -110,10 +110,10 @@ func benchmarkPool(b *testing.B, poolsize int) {
cycles := 100
b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
+
+ for b.Loop() {
eg := new(errgroup.Group)
- for j := 0; j < cycles; j++ {
+ for range cycles {
eg.Go(func() error {
h := pool.Get()
defer pool.Put(h)
@@ -136,8 +136,8 @@ func benchmarkRefHasher(b *testing.B, n int) {
rbmt := reference.NewRefHasher(swarm.NewHasher(), 128)
b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
+
+ for b.Loop() {
_, err := rbmt.Hash(testData[:n])
if err != nil {
b.Fatal(err)
diff --git a/pkg/bmt/bmt_test.go b/pkg/bmt/bmt_test.go
index 0eefe9fdb5d..c82fb896a6f 100644
--- a/pkg/bmt/bmt_test.go
+++ b/pkg/bmt/bmt_test.go
@@ -129,7 +129,7 @@ func testHasherReuse(t *testing.T, poolsize int) {
h := pool.Get()
defer pool.Put(h)
- for i := 0; i < 100; i++ {
+ for i := range 100 {
seed := int64(i)
testData := testutil.RandBytesWithSeed(t, 4096, seed)
n := rand.Intn(h.Capacity())
@@ -151,7 +151,7 @@ func TestBMTConcurrentUse(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
eg, ectx := errgroup.WithContext(ctx)
- for i := 0; i < cycles; i++ {
+ for range cycles {
eg.Go(func() error {
select {
case <-ectx.Done():
@@ -204,7 +204,7 @@ func TestBMTWriterBuffers(t *testing.T) {
reads := rand.Intn(count*2-1) + 1
offsets := make([]int, reads+1)
- for i := 0; i < reads; i++ {
+ for i := range reads {
offsets[i] = rand.Intn(size) + 1
}
offsets[reads] = size
@@ -235,7 +235,7 @@ func TestBMTWriterBuffers(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
eg, ectx := errgroup.WithContext(ctx)
- for i := 0; i < attempts; i++ {
+ for range attempts {
eg.Go(func() error {
select {
case <-ectx.Done():
diff --git a/pkg/bmt/proof.go b/pkg/bmt/proof.go
index 481a50d87e5..b08017b43cd 100644
--- a/pkg/bmt/proof.go
+++ b/pkg/bmt/proof.go
@@ -20,7 +20,7 @@ type Proof struct {
// Hash overrides base hash function of Hasher to fill buffer with zeros until chunk length
func (p Prover) Hash(b []byte) ([]byte, error) {
for i := p.size; i < p.maxSize; i += len(zerosection) {
- _, err := p.Hasher.Write(zerosection)
+ _, err := p.Write(zerosection)
if err != nil {
return nil, err
}
diff --git a/pkg/bmt/proof_test.go b/pkg/bmt/proof_test.go
index ba1c3e7220c..5b3625acefb 100644
--- a/pkg/bmt/proof_test.go
+++ b/pkg/bmt/proof_test.go
@@ -209,7 +209,7 @@ func TestProof(t *testing.T) {
t.Fatal(err)
}
- for i := 0; i < 128; i++ {
+ for i := range 128 {
t.Run(fmt.Sprintf("segmentIndex %d", i), func(t *testing.T) {
t.Parallel()
diff --git a/pkg/bmt/reference/reference.go b/pkg/bmt/reference/reference.go
index 8b399f8ffc0..416a7d4a354 100644
--- a/pkg/bmt/reference/reference.go
+++ b/pkg/bmt/reference/reference.go
@@ -38,10 +38,7 @@ func NewRefHasher(h hash.Hash, count int) *RefHasher {
func (rh *RefHasher) Hash(data []byte) ([]byte, error) {
// if data is shorter than the base length (maxDataLength), we provide padding with zeros
d := make([]byte, rh.maxDataLength)
- length := len(data)
- if length > rh.maxDataLength {
- length = rh.maxDataLength
- }
+ length := min(len(data), rh.maxDataLength)
copy(d, data[:length])
return rh.hash(d, rh.maxDataLength)
}
diff --git a/pkg/bzz/address.go b/pkg/bzz/address.go
index 42fa8fd90b4..89914ef66bf 100644
--- a/pkg/bzz/address.go
+++ b/pkg/bzz/address.go
@@ -14,6 +14,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "slices"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/bee/v2/pkg/crypto"
@@ -28,7 +29,7 @@ var ErrInvalidAddress = errors.New("invalid address")
// It consists of a peers underlay (physical) address, overlay (topology) address and signature.
// Signature is used to verify the `Overlay/Underlay` pair, as it is based on `underlay|networkID`, signed with the public key of Overlay address
type Address struct {
- Underlay ma.Multiaddr
+ Underlays []ma.Multiaddr
Overlay swarm.Address
Signature []byte
Nonce []byte
@@ -36,25 +37,23 @@ type Address struct {
}
type addressJSON struct {
- Overlay string `json:"overlay"`
- Underlay string `json:"underlay"`
- Signature string `json:"signature"`
- Nonce string `json:"transaction"`
+ Overlay string `json:"overlay"`
+ Underlay string `json:"underlay"` // For backward compatibility
+ Underlays []string `json:"underlays"`
+ Signature string `json:"signature"`
+ Nonce string `json:"transaction"`
}
-func NewAddress(signer crypto.Signer, underlay ma.Multiaddr, overlay swarm.Address, networkID uint64, nonce []byte) (*Address, error) {
- underlayBinary, err := underlay.MarshalBinary()
- if err != nil {
- return nil, err
- }
+func NewAddress(signer crypto.Signer, underlays []ma.Multiaddr, overlay swarm.Address, networkID uint64, nonce []byte) (*Address, error) {
+ underlaysBinary := SerializeUnderlays(underlays)
- signature, err := signer.Sign(generateSignData(underlayBinary, overlay.Bytes(), networkID))
+ signature, err := signer.Sign(generateSignData(underlaysBinary, overlay.Bytes(), networkID))
if err != nil {
return nil, err
}
return &Address{
- Underlay: underlay,
+ Underlays: underlays,
Overlay: overlay,
Signature: signature,
Nonce: nonce,
@@ -77,8 +76,13 @@ func ParseAddress(underlay, overlay, signature, nonce []byte, validateOverlay bo
}
}
- multiUnderlay, err := ma.NewMultiaddrBytes(underlay)
+ multiUnderlays, err := DeserializeUnderlays(underlay)
if err != nil {
+ return nil, fmt.Errorf("deserialize underlays: %w: %w", ErrInvalidAddress, err)
+ }
+
+ if len(multiUnderlays) == 0 {
+ // no underlays sent
return nil, ErrInvalidAddress
}
@@ -88,7 +92,7 @@ func ParseAddress(underlay, overlay, signature, nonce []byte, validateOverlay bo
}
return &Address{
- Underlay: multiUnderlay,
+ Underlays: multiUnderlays,
Overlay: swarm.NewAddress(overlay),
Signature: signature,
Nonce: nonce,
@@ -109,21 +113,49 @@ func (a *Address) Equal(b *Address) bool {
return a == b
}
- return a.Overlay.Equal(b.Overlay) && multiaddrEqual(a.Underlay, b.Underlay) && bytes.Equal(a.Signature, b.Signature) && bytes.Equal(a.Nonce, b.Nonce)
+ return a.Overlay.Equal(b.Overlay) && AreUnderlaysEqual(a.Underlays, b.Underlays) && bytes.Equal(a.Signature, b.Signature) && bytes.Equal(a.Nonce, b.Nonce)
}
-func multiaddrEqual(a, b ma.Multiaddr) bool {
- if a == nil || b == nil {
- return a == b
+func AreUnderlaysEqual(a, b []ma.Multiaddr) bool {
+ if len(a) != len(b) {
+ return false
}
- return a.Equal(b)
+ used := make([]bool, len(b))
+ for i := range len(a) {
+ found := false
+ for j := range len(b) {
+ if used[j] {
+ continue
+ }
+ if a[i].Equal(b[j]) {
+ used[j] = true
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
}
func (a *Address) MarshalJSON() ([]byte, error) {
+ if len(a.Underlays) == 0 {
+ return nil, fmt.Errorf("no underlays for %s", a.Overlay)
+ }
+
+ // select the underlay address for backward compatibility
+ var underlay string
+ if v := SelectBestAdvertisedAddress(a.Underlays, nil); v != nil {
+ underlay = v.String()
+ }
+
return json.Marshal(&addressJSON{
Overlay: a.Overlay.String(),
- Underlay: a.Underlay.String(),
+ Underlay: underlay,
+ Underlays: a.underlaysAsStrings(),
Signature: base64.StdEncoding.EncodeToString(a.Signature),
Nonce: common.Bytes2Hex(a.Nonce),
})
@@ -143,23 +175,48 @@ func (a *Address) UnmarshalJSON(b []byte) error {
a.Overlay = addr
- m, err := ma.NewMultiaddr(v.Underlay)
+ // append the underlay for backward compatibility
+ if !slices.Contains(v.Underlays, v.Underlay) {
+ v.Underlays = append(v.Underlays, v.Underlay)
+ }
+
+ multiaddrs, err := parseMultiaddrs(v.Underlays)
if err != nil {
return err
}
- a.Underlay = m
+ a.Underlays = multiaddrs
a.Signature, err = base64.StdEncoding.DecodeString(v.Signature)
a.Nonce = common.Hex2Bytes(v.Nonce)
return err
}
func (a *Address) String() string {
- return fmt.Sprintf("[Underlay: %v, Overlay %v, Signature %x, Transaction %x]", a.Underlay, a.Overlay, a.Signature, a.Nonce)
+ return fmt.Sprintf("[Underlay: %v, Overlay %v, Signature %x, Transaction %x]", a.underlaysAsStrings(), a.Overlay, a.Signature, a.Nonce)
}
// ShortString returns shortened versions of bzz address in a format: [Overlay, Underlay]
// It can be used for logging
func (a *Address) ShortString() string {
- return fmt.Sprintf("[Overlay: %s, Underlay: %s]", a.Overlay.String(), a.Underlay.String())
+ return fmt.Sprintf("[Overlay: %s, Underlays: %v]", a.Overlay.String(), a.underlaysAsStrings())
+}
+
+func (a *Address) underlaysAsStrings() []string {
+ underlays := make([]string, len(a.Underlays))
+ for i, underlay := range a.Underlays {
+ underlays[i] = underlay.String()
+ }
+ return underlays
+}
+
+func parseMultiaddrs(addrs []string) ([]ma.Multiaddr, error) {
+ multiAddrs := make([]ma.Multiaddr, len(addrs))
+ for i, addr := range addrs {
+ multiAddr, err := ma.NewMultiaddr(addr)
+ if err != nil {
+ return nil, err
+ }
+ multiAddrs[i] = multiAddr
+ }
+ return multiAddrs, nil
}
diff --git a/pkg/bzz/address_test.go b/pkg/bzz/address_test.go
index 969216d29d7..8d038010e25 100644
--- a/pkg/bzz/address_test.go
+++ b/pkg/bzz/address_test.go
@@ -11,13 +11,13 @@ import (
"github.com/ethersphere/bee/v2/pkg/bzz"
"github.com/ethersphere/bee/v2/pkg/crypto"
- ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multiaddr"
)
func TestBzzAddress(t *testing.T) {
t.Parallel()
- node1ma, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1634/p2p/16Uiu2HAkx8ULY8cTXhdVAcMmLcH9AsTKz6uBQ7DPLKRjMLgBVYkA")
+ node1ma, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/1634/p2p/16Uiu2HAkx8ULY8cTXhdVAcMmLcH9AsTKz6uBQ7DPLKRjMLgBVYkA")
if err != nil {
t.Fatal(err)
}
@@ -35,7 +35,7 @@ func TestBzzAddress(t *testing.T) {
}
signer1 := crypto.NewDefaultSigner(privateKey1)
- bzzAddress, err := bzz.NewAddress(signer1, node1ma, overlay, 3, nonce)
+ bzzAddress, err := bzz.NewAddress(signer1, []multiaddr.Multiaddr{node1ma}, overlay, 3, nonce)
if err != nil {
t.Fatal(err)
}
@@ -63,3 +63,113 @@ func TestBzzAddress(t *testing.T) {
t.Fatalf("got %s expected %s", newbzz, bzzAddress)
}
}
+
+func TestAreUnderlaysEqual(t *testing.T) {
+ // --- Test Data Initialization ---
+ addr1 := mustNewMultiaddr(t, "/ip4/127.0.0.1/tcp/8001")
+ addr2 := mustNewMultiaddr(t, "/ip4/192.168.1.1/tcp/8002")
+ addr3 := mustNewMultiaddr(t, "/ip6/::1/udp/9000")
+ addr4 := mustNewMultiaddr(t, "/ip4/127.0.0.1/tcp/8001") // Identical to addr1
+
+ // --- Test Cases Definition ---
+ testCases := []struct {
+ name string
+ a []multiaddr.Multiaddr
+ b []multiaddr.Multiaddr
+ want bool
+ }{
+ {
+ name: "two nil slices",
+ a: nil,
+ b: nil,
+ want: true,
+ },
+ {
+ name: "one nil and one empty slice",
+ a: nil,
+ b: []multiaddr.Multiaddr{},
+ want: true,
+ },
+ {
+ name: "one empty and one nil slice",
+ a: []multiaddr.Multiaddr{},
+ b: nil,
+ want: true,
+ },
+ {
+ name: "two empty slices",
+ a: []multiaddr.Multiaddr{},
+ b: []multiaddr.Multiaddr{},
+ want: true,
+ },
+ {
+ name: "equal slices with same order",
+ a: []multiaddr.Multiaddr{addr1, addr2},
+ b: []multiaddr.Multiaddr{addr1, addr2},
+ want: true,
+ },
+ {
+ name: "equal slices with different order",
+ a: []multiaddr.Multiaddr{addr1, addr2, addr3},
+ b: []multiaddr.Multiaddr{addr3, addr1, addr2},
+ want: true,
+ },
+ {
+ name: "equal slices with identical (but not same instance) values",
+ a: []multiaddr.Multiaddr{addr1, addr2},
+ b: []multiaddr.Multiaddr{addr4, addr2},
+ want: true,
+ },
+ {
+ name: "slices with different lengths (a < b)",
+ a: []multiaddr.Multiaddr{addr1},
+ b: []multiaddr.Multiaddr{addr1, addr2},
+ want: false,
+ },
+ {
+ name: "slices with different lengths (b < a)",
+ a: []multiaddr.Multiaddr{addr1, addr2},
+ b: []multiaddr.Multiaddr{addr1},
+ want: false,
+ },
+ {
+ name: "slices with same length but different elements",
+ a: []multiaddr.Multiaddr{addr1, addr2},
+ b: []multiaddr.Multiaddr{addr1, addr3},
+ want: false,
+ },
+ {
+ name: "one slice is nil",
+ a: []multiaddr.Multiaddr{addr1},
+ b: nil,
+ want: false,
+ },
+ {
+ name: "slices with duplicates, equal",
+ a: []multiaddr.Multiaddr{addr1, addr2, addr1},
+ b: []multiaddr.Multiaddr{addr1, addr1, addr2},
+ want: true,
+ },
+ {
+ name: "slices with duplicates, not equal",
+ a: []multiaddr.Multiaddr{addr1, addr2, addr3},
+ b: []multiaddr.Multiaddr{addr1, addr1, addr2},
+ want: false,
+ },
+ {
+ name: "slices with different duplicates",
+ a: []multiaddr.Multiaddr{addr1, addr1, addr2},
+ b: []multiaddr.Multiaddr{addr1, addr2, addr2},
+ want: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ got := bzz.AreUnderlaysEqual(tc.a, tc.b)
+ if got != tc.want {
+ t.Errorf("AreUnderlaysEqual() = %v, want %v", got, tc.want)
+ }
+ })
+ }
+}
diff --git a/pkg/bzz/export_test.go b/pkg/bzz/export_test.go
new file mode 100644
index 00000000000..531121edc4f
--- /dev/null
+++ b/pkg/bzz/export_test.go
@@ -0,0 +1,7 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bzz
+
+const UnderlayListPrefix = underlayListPrefix
diff --git a/pkg/bzz/transport.go b/pkg/bzz/transport.go
new file mode 100644
index 00000000000..579a1807936
--- /dev/null
+++ b/pkg/bzz/transport.go
@@ -0,0 +1,108 @@
+// Copyright 2026 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bzz
+
+import (
+ "sort"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// TransportType represents the transport protocol of a multiaddress.
+type TransportType int
+
+const (
+ // TransportUnknown indicates an unrecognized transport.
+ TransportUnknown TransportType = iota
+ // TransportTCP indicates plain TCP without WebSocket.
+ TransportTCP
+ // TransportWS indicates WebSocket without TLS.
+ TransportWS
+ // TransportWSS indicates WebSocket with TLS (secure).
+ TransportWSS
+)
+
+// String returns a string representation of the transport type.
+func (t TransportType) String() string {
+ switch t {
+ case TransportTCP:
+ return "tcp"
+ case TransportWS:
+ return "ws"
+ case TransportWSS:
+ return "wss"
+ default:
+ return "unknown"
+ }
+}
+
+// Priority returns the sorting priority for the transport type.
+// Lower value = higher priority: TCP (0) > WS (1) > WSS (2) > Unknown (3)
+func (t TransportType) Priority() int {
+ switch t {
+ case TransportTCP:
+ return 0
+ case TransportWS:
+ return 1
+ case TransportWSS:
+ return 2
+ default:
+ return 3
+ }
+}
+
+// ClassifyTransport returns the transport type of a multiaddress.
+// It distinguishes between plain TCP, WebSocket (WS), and secure WebSocket (WSS).
+func ClassifyTransport(addr ma.Multiaddr) TransportType {
+ if addr == nil {
+ return TransportUnknown
+ }
+
+ hasProtocol := func(p int) bool {
+ _, err := addr.ValueForProtocol(p)
+ return err == nil
+ }
+
+ hasWS := hasProtocol(ma.P_WS)
+ hasTLS := hasProtocol(ma.P_TLS)
+ hasTCP := hasProtocol(ma.P_TCP)
+
+ switch {
+ case hasWS && hasTLS:
+ return TransportWSS
+ case hasWS:
+ return TransportWS
+ case hasTCP:
+ return TransportTCP
+ default:
+ return TransportUnknown
+ }
+}
+
+func SelectBestAdvertisedAddress(addrs []ma.Multiaddr, fallback ma.Multiaddr) ma.Multiaddr {
+ if len(addrs) == 0 {
+ return fallback
+ }
+
+ // Sort addresses: first by transport priority (TCP > WS > WSS), preserving relative order
+ sort.SliceStable(addrs, func(i, j int) bool {
+ return ClassifyTransport(addrs[i]).Priority() < ClassifyTransport(addrs[j]).Priority()
+ })
+
+ for _, addr := range addrs {
+ if manet.IsPublicAddr(addr) {
+ return addr
+ }
+ }
+
+ for _, addr := range addrs {
+ if !manet.IsPrivateAddr(addr) {
+ return addr
+ }
+ }
+
+ return addrs[0]
+}
diff --git a/pkg/bzz/transport_test.go b/pkg/bzz/transport_test.go
new file mode 100644
index 00000000000..fab5f90246f
--- /dev/null
+++ b/pkg/bzz/transport_test.go
@@ -0,0 +1,338 @@
+// Copyright 2026 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bzz_test
+
+import (
+ "testing"
+
+ "github.com/ethersphere/bee/v2/pkg/bzz"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+func TestSelectBestAdvertisedAddress(t *testing.T) {
+ t.Parallel()
+
+ mustMultiaddr := func(s string) multiaddr.Multiaddr {
+ addr, err := multiaddr.NewMultiaddr(s)
+ if err != nil {
+ t.Fatalf("failed to create multiaddr %s: %v", s, err)
+ }
+ return addr
+ }
+
+ tests := []struct {
+ name string
+ addrs []multiaddr.Multiaddr
+ fallback multiaddr.Multiaddr
+ expected multiaddr.Multiaddr
+ }{
+ {
+ name: "empty addresses returns fallback",
+ addrs: []multiaddr.Multiaddr{},
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ expected: mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ },
+ {
+ name: "nil addresses returns fallback",
+ addrs: nil,
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ expected: mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ },
+ {
+ name: "prefers public addresses",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/192.168.1.1/tcp/8080"), // private
+ mustMultiaddr("/ip4/8.8.8.8/tcp/8080"), // public
+ mustMultiaddr("/ip4/10.0.0.1/tcp/8080"), // private
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ expected: mustMultiaddr("/ip4/8.8.8.8/tcp/8080"),
+ },
+ {
+ name: "prefers first public address when multiple exist",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/192.168.1.1/tcp/8080"), // private
+ mustMultiaddr("/ip4/8.8.8.8/tcp/8080"), // public
+ mustMultiaddr("/ip4/1.1.1.1/tcp/8080"), // public
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ expected: mustMultiaddr("/ip4/8.8.8.8/tcp/8080"),
+ },
+ {
+ name: "prefers non-private when no public addresses",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/127.0.0.1/tcp/8080"), // loopback
+ mustMultiaddr("/ip4/192.168.1.1/tcp/8080"), // private but not loopback
+ mustMultiaddr("/ip4/10.0.0.1/tcp/8080"), // private but not loopback
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/9999"),
+ expected: mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ },
+ {
+ name: "returns first address when all are loopback",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ mustMultiaddr("/ip4/127.0.0.1/tcp/8081"),
+ mustMultiaddr("/ip6/::1/tcp/8080"),
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/9999"),
+ expected: mustMultiaddr("/ip4/127.0.0.1/tcp/8080"),
+ },
+ {
+ name: "sorts TCP addresses first",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/192.168.1.1/udp/8080"), // UDP
+ mustMultiaddr("/ip4/1.1.1.1/udp/8080"), // UDP public
+ mustMultiaddr("/ip4/8.8.8.8/tcp/8080"), // TCP public
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/9999"),
+ expected: mustMultiaddr("/ip4/8.8.8.8/tcp/8080"),
+ },
+ {
+ name: "handles IPv6 addresses",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip6/::1/tcp/8080"), // loopback
+ mustMultiaddr("/ip6/2001:db8::1/tcp/8080"), // public IPv6
+ mustMultiaddr("/ip4/192.168.1.1/tcp/8080"), // private IPv4
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/9999"),
+ expected: mustMultiaddr("/ip6/2001:db8::1/tcp/8080"),
+ },
+ {
+ name: "handles mixed protocols with preference order",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/192.168.1.1/udp/8080"), // private UDP
+ mustMultiaddr("/ip4/192.168.1.2/tcp/8080"), // private TCP
+ mustMultiaddr("/ip4/127.0.0.1/tcp/8080"), // loopback TCP
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/9999"),
+ expected: mustMultiaddr("/ip4/192.168.1.2/tcp/8080"), // first TCP, and it's non-loopback
+ },
+ {
+ name: "single address",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/192.168.1.1/tcp/8080"),
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/9999"),
+ expected: mustMultiaddr("/ip4/192.168.1.1/tcp/8080"),
+ },
+ {
+ name: "websocket addresses",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/127.0.0.1/tcp/8080/ws"),
+ mustMultiaddr("/ip4/8.8.8.8/tcp/8080/ws"), // public with websocket
+ },
+ fallback: mustMultiaddr("/ip4/127.0.0.1/tcp/9999"),
+ expected: mustMultiaddr("/ip4/8.8.8.8/tcp/8080/ws"),
+ },
+ // Full underlay addresses tests
+ {
+ name: "full underlay addresses: prefers public TCP over private TCP",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/10.233.99.120/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // private
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // public
+ mustMultiaddr("/ip4/127.0.0.1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // loopback
+ },
+ fallback: nil,
+ expected: mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ {
+ name: "full underlay addresses: prefers public TCP over WSS addresses",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/10.233.99.120/tcp/1635/tls/sni/10-233-99-120.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // private WSS
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // public TCP
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // public WSS
+ },
+ fallback: nil,
+ expected: mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ {
+ name: "full underlay addresses: full node underlay list selects public TCP",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/10.233.99.120/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/10.233.99.120/tcp/1635/tls/sni/10-233-99-120.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/127.0.0.1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/127.0.0.1/tcp/1635/tls/sni/127-0-0-1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip6/::1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip6/::1/tcp/1635/tls/sni/0--1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ fallback: nil,
+ expected: mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ {
+ name: "full underlay addresses: WSS only list selects public WSS",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/10.233.99.120/tcp/1635/tls/sni/10-233-99-120.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/127.0.0.1/tcp/1635/tls/sni/127-0-0-1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip6/::1/tcp/1635/tls/sni/0--1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ fallback: nil,
+ expected: mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ {
+ name: "full underlay addresses: TCP only list selects public TCP",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/10.233.99.120/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip4/127.0.0.1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip6/::1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ fallback: nil,
+ expected: mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ {
+ name: "full underlay addresses: IPv6 loopback list with no public addresses",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip6/::1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ mustMultiaddr("/ip6/::1/tcp/1635/tls/sni/0--1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ fallback: nil,
+ expected: mustMultiaddr("/ip6/::1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ {
+ name: "full underlay addresses: WSS before TCP in input - TCP is still selected due to transport priority",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // public WSS (first in input)
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // public TCP (second in input)
+ },
+ fallback: nil,
+ // TCP is selected because:
+ // 1. Transport priority: TCP (0) > WS (1) > WSS (2)
+ // 2. Sorting puts TCP addresses before WSS addresses
+ // 3. Both are public, so the first public after sorting (TCP) is returned
+ expected: mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ {
+ name: "full underlay addresses: TCP before WSS in input - TCP is selected",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // public TCP (first)
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // public WSS (second)
+ },
+ fallback: nil,
+ // TCP is selected because it has higher transport priority
+ expected: mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ {
+ name: "full underlay addresses: private TCP vs public WSS - public WSS is selected",
+ addrs: []multiaddr.Multiaddr{
+ mustMultiaddr("/ip4/10.233.99.120/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // private TCP
+ mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"), // public WSS
+ },
+ fallback: nil,
+ // Public WSS is selected because there is no public TCP address
+ // Priority: public addresses > private addresses, then by transport type
+ expected: mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := bzz.SelectBestAdvertisedAddress(tt.addrs, tt.fallback)
+ if !result.Equal(tt.expected) {
+ t.Errorf("SelectBestAdvertisedAddress() = %v, want %v", result, tt.expected)
+ }
+ })
+ }
+}
+
+func TestClassifyTransport(t *testing.T) {
+ t.Parallel()
+
+ mustMultiaddr := func(s string) multiaddr.Multiaddr {
+ addr, err := multiaddr.NewMultiaddr(s)
+ if err != nil {
+ t.Fatalf("failed to create multiaddr %s: %v", s, err)
+ }
+ return addr
+ }
+
+ tests := []struct {
+ name string
+ addr multiaddr.Multiaddr
+ expected bzz.TransportType
+ }{
+ {
+ name: "plain TCP address",
+ addr: mustMultiaddr("/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ expected: bzz.TransportTCP,
+ },
+ {
+ name: "plain TCP IPv6 address",
+ addr: mustMultiaddr("/ip6/::1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ expected: bzz.TransportTCP,
+ },
+ {
+ name: "plain WS address",
+ addr: mustMultiaddr("/ip4/127.0.0.1/tcp/8080/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ expected: bzz.TransportWS,
+ },
+ {
+ name: "WSS address with TLS and SNI",
+ addr: mustMultiaddr("/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ expected: bzz.TransportWSS,
+ },
+ {
+ name: "WSS IPv6 address",
+ addr: mustMultiaddr("/ip6/::1/tcp/1635/tls/sni/0--1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"),
+ expected: bzz.TransportWSS,
+ },
+ {
+ name: "UDP address returns unknown",
+ addr: mustMultiaddr("/ip4/127.0.0.1/udp/8080"),
+ expected: bzz.TransportUnknown,
+ },
+ {
+ name: "nil address returns unknown",
+ addr: nil,
+ expected: bzz.TransportUnknown,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := bzz.ClassifyTransport(tt.addr)
+ if result != tt.expected {
+ t.Errorf("ClassifyTransport() = %v (%s), want %v (%s)", result, result.String(), tt.expected, tt.expected.String())
+ }
+ })
+ }
+}
+
+func TestTransportTypePriority(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ transport bzz.TransportType
+ priority int
+ }{
+ {bzz.TransportTCP, 0},
+ {bzz.TransportWS, 1},
+ {bzz.TransportWSS, 2},
+ {bzz.TransportUnknown, 3},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.transport.String(), func(t *testing.T) {
+ if got := tt.transport.Priority(); got != tt.priority {
+ t.Errorf("TransportType(%v).Priority() = %d, want %d", tt.transport, got, tt.priority)
+ }
+ })
+ }
+
+ // Verify priority ordering: TCP < WS < WSS < Unknown
+ if bzz.TransportTCP.Priority() >= bzz.TransportWS.Priority() {
+ t.Error("TCP priority should be lower (better) than WS")
+ }
+ if bzz.TransportWS.Priority() >= bzz.TransportWSS.Priority() {
+ t.Error("WS priority should be lower (better) than WSS")
+ }
+ if bzz.TransportWSS.Priority() >= bzz.TransportUnknown.Priority() {
+ t.Error("WSS priority should be lower (better) than Unknown")
+ }
+}
diff --git a/pkg/bzz/underlay.go b/pkg/bzz/underlay.go
new file mode 100644
index 00000000000..fdddf21a404
--- /dev/null
+++ b/pkg/bzz/underlay.go
@@ -0,0 +1,98 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bzz
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-varint"
+)
+
+// underlayListPrefix is a magic byte designated for identifying a serialized list of multiaddrs.
+// A value of 0x99 (153) was chosen as it is not a defined multiaddr protocol code.
+// This ensures that a failure is triggered by the original multiaddr.NewMultiaddrBytes function,
+// which expects a valid protocol code at the start of the data.
+const underlayListPrefix byte = 0x99
+
+// SerializeUnderlays serializes a slice of multiaddrs into a single byte slice.
+// If the slice contains exactly one address, the standard, backward-compatible
+// multiaddr format is used. For zero or more than one address, a custom list format
+// prefixed with a magic byte is utilized.
+func SerializeUnderlays(addrs []multiaddr.Multiaddr) []byte {
+ // Backward compatibility if exactly one address is present.
+ if len(addrs) == 1 {
+ return addrs[0].Bytes()
+ }
+
+ // For 0 or 2+ addresses, the custom list format with the prefix is used.
+ // The format is: [prefix_byte][varint_len_1][addr_1_bytes]...
+ var buf bytes.Buffer
+ buf.WriteByte(underlayListPrefix)
+
+ for _, addr := range addrs {
+ addrBytes := addr.Bytes()
+ buf.Write(varint.ToUvarint(uint64(len(addrBytes))))
+ buf.Write(addrBytes)
+ }
+ return buf.Bytes()
+}
+
+// DeserializeUnderlays deserializes a byte slice into a slice of multiaddrs.
+// The data format is automatically detected as either a single legacy multiaddr
+// or a list of multiaddrs (identified by underlayListPrefix), and is parsed accordingly.
+func DeserializeUnderlays(data []byte) ([]multiaddr.Multiaddr, error) {
+ if len(data) == 0 {
+ return nil, errors.New("cannot deserialize empty byte slice")
+ }
+
+ // If the data begins with the magic prefix, it is handled as a list.
+ if data[0] == underlayListPrefix {
+ return deserializeList(data[1:])
+ }
+
+ // Otherwise, the data is handled as a single, backward-compatible multiaddr.
+ addr, err := multiaddr.NewMultiaddrBytes(data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse as single multiaddr: %w", err)
+ }
+ // The result is returned as a single-element slice for a consistent return type.
+ return []multiaddr.Multiaddr{addr}, nil
+}
+
+// deserializeList handles the parsing of the custom list format.
+// The provided data is expected to have already been stripped of the underlayListPrefix.
+func deserializeList(data []byte) ([]multiaddr.Multiaddr, error) {
+ var addrs []multiaddr.Multiaddr
+ r := bytes.NewReader(data)
+
+ for r.Len() > 0 {
+ // The varint-encoded length of the next address is read.
+ addrLen, err := varint.ReadUvarint(r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read address length from list: %w", err)
+ }
+
+ // A sanity check is performed to ensure enough bytes remain for the declared length.
+ if uint64(r.Len()) < addrLen {
+ return nil, fmt.Errorf("inconsistent data: expected %d bytes for address, but only %d remain", addrLen, r.Len())
+ }
+
+ // The individual address bytes are read and parsed.
+ addrBytes := make([]byte, addrLen)
+ if _, err := r.Read(addrBytes); err != nil {
+ return nil, fmt.Errorf("failed to read address bytes: %w", err)
+ }
+
+ addr, err := multiaddr.NewMultiaddrBytes(addrBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse multiaddr from list: %w", err)
+ }
+ addrs = append(addrs, addr)
+ }
+ return addrs, nil
+}
diff --git a/pkg/bzz/underlay_test.go b/pkg/bzz/underlay_test.go
new file mode 100644
index 00000000000..5bc4e34c19d
--- /dev/null
+++ b/pkg/bzz/underlay_test.go
@@ -0,0 +1,252 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bzz_test
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+
+ "github.com/ethersphere/bee/v2/pkg/bzz"
+ "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-varint"
+)
+
+func TestSerializeUnderlays(t *testing.T) {
+ ip4TCPAddr := mustNewMultiaddr(t, "/ip4/127.0.0.1/tcp/80/p2p/QmWqeeHEqG2db37JsuKUxyJ2JF8LtVJMGohKVT8h3aeCVH")
+ p2pAddr := mustNewMultiaddr(t, "/ip4/65.108.66.216/tcp/16341/p2p/QmVuCJ3M96c7vwv4MQBv7WY1HWQacyCEHvM99R8MUDj95d")
+ wssAddr := mustNewMultiaddr(t, "/ip4/127.0.0.1/tcp/443/wss/p2p/QmWqeeHEqG2db37JsuKUxyJ2JF8LtVJMGohKVT8h3aeCVH")
+ dnsSwarmAddr := mustNewMultiaddr(t, "/dnsaddr/mainnet.ethswarm.org")
+
+ t.Run("multiple addresses list", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{ip4TCPAddr, p2pAddr, wssAddr, dnsSwarmAddr}
+ serialized := bzz.SerializeUnderlays(addrs)
+
+ if serialized[0] != bzz.UnderlayListPrefix {
+ t.Errorf("expected prefix %x for multiple addresses, got %x", bzz.UnderlayListPrefix, serialized[0])
+ }
+ })
+
+ t.Run("single address list", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{dnsSwarmAddr}
+ serialized := bzz.SerializeUnderlays(addrs)
+ expected := dnsSwarmAddr.Bytes() // Should be legacy format without prefix
+
+ if !bytes.Equal(serialized, expected) {
+ t.Errorf("expected single address to serialize to legacy format %x, got %x", expected, serialized)
+ }
+ if serialized[0] == bzz.UnderlayListPrefix {
+ t.Error("single address serialization should not have the list prefix")
+ }
+ })
+
+ t.Run("empty list", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{}
+ serialized := bzz.SerializeUnderlays(addrs)
+ expected := []byte{bzz.UnderlayListPrefix}
+ if !bytes.Equal(serialized, expected) {
+ t.Errorf("expected %x for empty list, got %x", expected, serialized)
+ }
+ })
+
+ t.Run("nil list", func(t *testing.T) {
+ var addrs []multiaddr.Multiaddr = nil
+ serialized := bzz.SerializeUnderlays(addrs)
+ expected := []byte{bzz.UnderlayListPrefix}
+ if !bytes.Equal(serialized, expected) {
+ t.Errorf("expected %x for nil list, got %x", expected, serialized)
+ }
+ })
+}
+
+func TestDeserializeUnderlays(t *testing.T) {
+ ip4TCPAddr := mustNewMultiaddr(t, "/ip4/127.0.0.1/tcp/80/p2p/QmWqeeHEqG2db37JsuKUxyJ2JF8LtVJMGohKVT8h3aeCVH")
+ p2pAddr := mustNewMultiaddr(t, "/ip4/65.108.66.216/tcp/16341/p2p/QmVuCJ3M96c7vwv4MQBv7WY1HWQacyCEHvM99R8MUDj95d")
+ wssAddr := mustNewMultiaddr(t, "/ip4/127.0.0.1/tcp/443/wss/p2p/QmWqeeHEqG2db37JsuKUxyJ2JF8LtVJMGohKVT8h3aeCVH")
+
+ t.Run("valid list of multiple", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{ip4TCPAddr, p2pAddr, wssAddr}
+ serialized := bzz.SerializeUnderlays(addrs)
+ deserialized, err := bzz.DeserializeUnderlays(serialized)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if !reflect.DeepEqual(addrs, deserialized) {
+ t.Errorf("expected %v, got %v", addrs, deserialized)
+ }
+ })
+
+ t.Run("single legacy multiaddr", func(t *testing.T) {
+ singleBytes := wssAddr.Bytes()
+ deserialized, err := bzz.DeserializeUnderlays(singleBytes)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if len(deserialized) != 1 || !deserialized[0].Equal(wssAddr) {
+ t.Errorf("expected [%v], got %v", wssAddr, deserialized)
+ }
+ })
+
+ t.Run("empty byte slice", func(t *testing.T) {
+ _, err := bzz.DeserializeUnderlays([]byte{})
+ if err == nil {
+ t.Error("expected an error for empty slice, but got nil")
+ }
+ })
+
+ t.Run("list with only prefix", func(t *testing.T) {
+ deserialized, err := bzz.DeserializeUnderlays([]byte{bzz.UnderlayListPrefix})
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if len(deserialized) != 0 {
+ t.Errorf("expected empty slice, got %v", deserialized)
+ }
+ })
+
+ t.Run("serialize deserialize empty list", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{}
+ serialized := bzz.SerializeUnderlays(addrs)
+ if !bytes.Equal(serialized, []byte{bzz.UnderlayListPrefix}) {
+ t.Errorf("expected %v, got %v", []byte{bzz.UnderlayListPrefix}, serialized)
+ }
+ deserialized, err := bzz.DeserializeUnderlays(serialized)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if len(deserialized) != 0 {
+ t.Errorf("expected empty slice, got %v", deserialized)
+ }
+ })
+
+ t.Run("serialize deserialize nil list", func(t *testing.T) {
+ serialized := bzz.SerializeUnderlays(nil)
+ if !bytes.Equal(serialized, []byte{bzz.UnderlayListPrefix}) {
+ t.Errorf("expected %v, got %v", []byte{bzz.UnderlayListPrefix}, serialized)
+ }
+ deserialized, err := bzz.DeserializeUnderlays(serialized)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if len(deserialized) != 0 {
+ t.Errorf("expected empty slice, got %v", deserialized)
+ }
+ })
+
+ t.Run("corrupted list - length too long", func(t *testing.T) {
+ maBytes := ip4TCPAddr.Bytes()
+ var buf bytes.Buffer
+ buf.WriteByte(bzz.UnderlayListPrefix)
+ buf.Write(varint.ToUvarint(uint64(len(maBytes) + 5))) // Write a length that is too long
+ buf.Write(maBytes)
+
+ _, err := bzz.DeserializeUnderlays(buf.Bytes())
+ if err == nil {
+ t.Error("expected an error for corrupted data, but got nil")
+ }
+ })
+
+ t.Run("corrupted list - invalid multiaddr bytes", func(t *testing.T) {
+ invalidAddrBytes := []byte{0xde, 0xad, 0xbe, 0xef}
+ var buf bytes.Buffer
+ buf.WriteByte(bzz.UnderlayListPrefix)
+ buf.Write(varint.ToUvarint(uint64(len(invalidAddrBytes))))
+ buf.Write(invalidAddrBytes)
+
+ _, err := bzz.DeserializeUnderlays(buf.Bytes())
+ if err == nil {
+ t.Error("expected an error for invalid multiaddr bytes, but got nil")
+ }
+ })
+}
+
+func TestSerializeUnderlaysDeserializeUnderlays(t *testing.T) {
+ ip4TCPAddr := mustNewMultiaddr(t, "/ip4/127.0.0.1/tcp/80/p2p/QmWqeeHEqG2db37JsuKUxyJ2JF8LtVJMGohKVT8h3aeCVH")
+ dnsSwarmAddr := mustNewMultiaddr(t, "/dnsaddr/mainnet.ethswarm.org")
+ p2pAddr := mustNewMultiaddr(t, "/ip4/65.108.66.216/tcp/16341/p2p/QmVuCJ3M96c7vwv4MQBv7WY1HWQacyCEHvM99R8MUDj95d")
+ wssAddr := mustNewMultiaddr(t, "/ip4/127.0.0.1/tcp/443/wss/p2p/QmWqeeHEqG2db37JsuKUxyJ2JF8LtVJMGohKVT8h3aeCVH")
+
+ t.Run("multiple addresses list", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{ip4TCPAddr, dnsSwarmAddr, p2pAddr, wssAddr}
+ serialized := bzz.SerializeUnderlays(addrs)
+ deserialized, err := bzz.DeserializeUnderlays(serialized)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if !reflect.DeepEqual(addrs, deserialized) {
+ t.Errorf("round trip failed. expected %v, got %v", addrs, deserialized)
+ }
+ })
+
+ t.Run("single address list", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{dnsSwarmAddr}
+ serialized := bzz.SerializeUnderlays(addrs)
+ deserialized, err := bzz.DeserializeUnderlays(serialized)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if !reflect.DeepEqual(addrs, deserialized) {
+ t.Errorf("round trip failed. expected %v, got %v", addrs, deserialized)
+ }
+ })
+
+ t.Run("empty list", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{}
+ serialized := bzz.SerializeUnderlays(addrs)
+ deserialized, err := bzz.DeserializeUnderlays(serialized)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if len(deserialized) != 0 {
+ t.Errorf("expected empty slice from round trip, got %v", deserialized)
+ }
+ })
+}
+
+func TestLegacyCompatibility(t *testing.T) {
+ ip4TCPAddr := mustNewMultiaddr(t, "/ip4/1.2.3.4/tcp/5678/p2p/QmWqeeHEqG2db37JsuKUxyJ2JF8LtVJMGohKVT8h3aeCVH")
+ p2pAddr := mustNewMultiaddr(t, "/ip4/65.108.66.216/tcp/16341/p2p/QmVuCJ3M96c7vwv4MQBv7WY1HWQacyCEHvM99R8MUDj95d")
+ dnsSwarmAddr := mustNewMultiaddr(t, "/dnsaddr/mainnet.ethswarm.org")
+
+ t.Run("legacy parser fails on new list format", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{ip4TCPAddr, p2pAddr, dnsSwarmAddr}
+ listBytes := bzz.SerializeUnderlays(addrs) // This will have the prefix
+ _, err := multiaddr.NewMultiaddrBytes(listBytes)
+ if err == nil {
+ t.Error("expected legacy NewMultiaddrBytes to fail on list format, but it succeeded")
+ }
+ })
+
+ t.Run("legacy parser succeeds on new single-addr format", func(t *testing.T) {
+ addrs := []multiaddr.Multiaddr{dnsSwarmAddr}
+ singleBytes := bzz.SerializeUnderlays(addrs) // This will NOT have the prefix
+ _, err := multiaddr.NewMultiaddrBytes(singleBytes)
+ if err != nil {
+ t.Errorf("expected legacy NewMultiaddrBytes to succeed on single-addr format, but it failed: %v", err)
+ }
+ })
+
+ t.Run("new parser succeeds on legacy format", func(t *testing.T) {
+ singleBytes := p2pAddr.Bytes()
+ deserialized, err := bzz.DeserializeUnderlays(singleBytes)
+ if err != nil {
+ t.Fatalf("Deserialize failed on legacy bytes: %v", err)
+ }
+ expected := []multiaddr.Multiaddr{p2pAddr}
+ if !reflect.DeepEqual(expected, deserialized) {
+ t.Errorf("expected %v, got %v", expected, deserialized)
+ }
+ })
+}
+
+func mustNewMultiaddr(tb testing.TB, s string) multiaddr.Multiaddr {
+ tb.Helper()
+
+ a, err := multiaddr.NewMultiaddr(s)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ return a
+}
diff --git a/pkg/bzz/utilities_test.go b/pkg/bzz/utilities_test.go
index 7e4666b66ac..6a9fd9a97dd 100644
--- a/pkg/bzz/utilities_test.go
+++ b/pkg/bzz/utilities_test.go
@@ -18,6 +18,8 @@ func Test_ContainsAddress(t *testing.T) {
t.Parallel()
addrs := makeAddreses(t, 10)
+ multiAddr := makeMultiUnderlayAddress(t)
+
tt := []struct {
addresses []bzz.Address
search bzz.Address
@@ -32,6 +34,8 @@ func Test_ContainsAddress(t *testing.T) {
{addresses: addrs, search: addrs[1], contains: true},
{addresses: addrs, search: addrs[3], contains: true},
{addresses: addrs, search: addrs[9], contains: true},
+ {addresses: addrs, search: multiAddr, contains: false},
+ {addresses: append(addrs, multiAddr), search: multiAddr, contains: true},
}
for _, tc := range tt {
@@ -45,7 +49,7 @@ func makeAddreses(t *testing.T, count int) []bzz.Address {
t.Helper()
result := make([]bzz.Address, count)
- for i := 0; i < count; i++ {
+ for i := range count {
result[i] = makeAddress(t)
}
return result
@@ -60,7 +64,34 @@ func makeAddress(t *testing.T) bzz.Address {
}
return bzz.Address{
- Underlay: multiaddr,
+ Underlays: []ma.Multiaddr{multiaddr},
+ Overlay: swarm.RandAddress(t),
+ Signature: testutil.RandBytes(t, 12),
+ Nonce: testutil.RandBytes(t, 12),
+ EthereumAddress: testutil.RandBytes(t, 32),
+ }
+}
+
+func makeMultiUnderlayAddress(t *testing.T) bzz.Address {
+ t.Helper()
+
+ addrsStrings := []string{
+ "/ip4/127.0.0.1/tcp/1634/p2p/16Uiu2HAkx8ULY8cTXhdVAcMmLcH9AsTKz6uBQ7DPLKRjMLgBVYkA",
+ "/ip4/10.34.35.60/tcp/35315/p2p/12D3KooWNZujn3N8EcGEFRJrQHw9Hb9v39bNyHC2uw2YNv9jBtjk",
+ "/ip6/::1/tcp/46881/p2p/12D3KooWNZujn3N8EcGEFRJrQHw9Hb9v39bNyHC2uw2YNv9jBtjk",
+ }
+
+ underlays := make([]ma.Multiaddr, len(addrsStrings))
+ for i, addr := range addrsStrings {
+ multiaddr, err := ma.NewMultiaddr(addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ underlays[i] = multiaddr
+ }
+
+ return bzz.Address{
+ Underlays: underlays,
Overlay: swarm.RandAddress(t),
Signature: testutil.RandBytes(t, 12),
Nonce: testutil.RandBytes(t, 12),
diff --git a/pkg/crypto/crypto.go b/pkg/crypto/crypto.go
index c5ac2c7128c..6c43a1e4528 100644
--- a/pkg/crypto/crypto.go
+++ b/pkg/crypto/crypto.go
@@ -92,12 +92,12 @@ func GenerateSecp256r1Key() (*ecdsa.PrivateKey, error) {
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
}
-// EncodeSecp256k1PrivateKey encodes raw ECDSA private key.
+// EncodeSecp256r1PrivateKey encodes raw ECDSA private key.
func EncodeSecp256r1PrivateKey(k *ecdsa.PrivateKey) ([]byte, error) {
return x509.MarshalECPrivateKey(k)
}
-// DecodeSecp256k1PrivateKey decodes raw ECDSA private key.
+// DecodeSecp256r1PrivateKey decodes raw ECDSA private key.
func DecodeSecp256r1PrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
return x509.ParseECPrivateKey(data)
}
diff --git a/pkg/crypto/dh.go b/pkg/crypto/dh.go
index 2d5b4e695fc..f3ea41facc4 100644
--- a/pkg/crypto/dh.go
+++ b/pkg/crypto/dh.go
@@ -29,7 +29,7 @@ func NewDH(key *ecdsa.PrivateKey) DH {
// safety warning: this method is not meant to be exposed as it does not validate private and public keys
// are on the same curve
func (dh *defaultDH) SharedKey(pub *ecdsa.PublicKey, salt []byte) ([]byte, error) {
- x, _ := pub.Curve.ScalarMult(pub.X, pub.Y, dh.key.D.Bytes())
+ x, _ := pub.ScalarMult(pub.X, pub.Y, dh.key.D.Bytes())
if x == nil {
return nil, errors.New("shared secret is point at infinity")
}
diff --git a/pkg/crypto/signer.go b/pkg/crypto/signer.go
index 02863ed2d6e..9c3ee670ded 100644
--- a/pkg/crypto/signer.go
+++ b/pkg/crypto/signer.go
@@ -89,7 +89,7 @@ func (d *defaultSigner) Sign(data []byte) (signature []byte, err error) {
return nil, err
}
- return d.sign(hash, true)
+ return d.sign(hash, false)
}
// SignTx signs an ethereum transaction.
@@ -140,7 +140,7 @@ func (d *defaultSigner) SignTypedData(typedData *eip712.TypedData) ([]byte, erro
// sign the provided hash and convert it to the ethereum (r,s,v) format.
func (d *defaultSigner) sign(sighash []byte, isCompressedKey bool) ([]byte, error) {
pvk, _ := btcec.PrivKeyFromBytes(d.key.D.Bytes())
- signature, err := btcecdsa.SignCompact(pvk, sighash, false)
+ signature, err := btcecdsa.SignCompact(pvk, sighash, isCompressedKey)
if err != nil {
return nil, err
}
diff --git a/pkg/crypto/signer_test.go b/pkg/crypto/signer_test.go
index b3b09ae04af..6d6690ec27e 100644
--- a/pkg/crypto/signer_test.go
+++ b/pkg/crypto/signer_test.go
@@ -41,7 +41,7 @@ func TestDefaultSigner(t *testing.T) {
t.Fatal(err)
}
- if pubKey.X.Cmp(privKey.PublicKey.X) != 0 || pubKey.Y.Cmp(privKey.PublicKey.Y) != 0 {
+ if pubKey.X.Cmp(privKey.X) != 0 || pubKey.Y.Cmp(privKey.Y) != 0 {
t.Fatalf("wanted %v but got %v", pubKey, &privKey.PublicKey)
}
})
@@ -54,7 +54,7 @@ func TestDefaultSigner(t *testing.T) {
t.Fatal(err)
}
- if pubKey.X.Cmp(privKey.PublicKey.X) == 0 && pubKey.Y.Cmp(privKey.PublicKey.Y) == 0 {
+ if pubKey.X.Cmp(privKey.X) == 0 && pubKey.Y.Cmp(privKey.Y) == 0 {
t.Fatal("expected different public key")
}
})
@@ -228,11 +228,11 @@ func TestRecoverEIP712(t *testing.T) {
t.Fatal(err)
}
- if privKey.PublicKey.X.Cmp(pubKey.X) != 0 {
+ if privKey.X.Cmp(pubKey.X) != 0 {
t.Fatalf("recovered wrong public key. wanted %x, got %x", privKey.PublicKey, pubKey)
}
- if privKey.PublicKey.Y.Cmp(pubKey.Y) != 0 {
+ if privKey.Y.Cmp(pubKey.Y) != 0 {
t.Fatalf("recovered wrong public key. wanted %x, got %x", privKey.PublicKey, pubKey)
}
}
diff --git a/pkg/encryption/encryption.go b/pkg/encryption/encryption.go
index 97a582814c8..d46d567088d 100644
--- a/pkg/encryption/encryption.go
+++ b/pkg/encryption/encryption.go
@@ -158,7 +158,7 @@ func (e *Encryption) Transcrypt(i int, in, out []byte) error {
// XOR bytes uptil length of in (out must be at least as long)
inLength := len(in)
- for j := 0; j < inLength; j++ {
+ for j := range inLength {
out[j] = in[j] ^ segmentKey[j]
}
// insert padding if out is longer
diff --git a/pkg/encryption/mock/mock.go b/pkg/encryption/mock/mock.go
index 66420fb2f1b..b917b095be7 100644
--- a/pkg/encryption/mock/mock.go
+++ b/pkg/encryption/mock/mock.go
@@ -124,7 +124,7 @@ func xor(input, key []byte) ([]byte, error) {
}
inputLen := len(input)
output := make([]byte, inputLen)
- for i := 0; i < inputLen; i++ {
+ for i := range inputLen {
output[i] = input[i] ^ key[i%keyLen]
}
return output, nil
diff --git a/pkg/feeds/epochs/lookup_benchmark_test.go b/pkg/feeds/epochs/lookup_benchmark_test.go
index 8f964bc553b..0d409bef686 100644
--- a/pkg/feeds/epochs/lookup_benchmark_test.go
+++ b/pkg/feeds/epochs/lookup_benchmark_test.go
@@ -38,7 +38,7 @@ func BenchmarkFinder(b *testing.B) {
ctx := context.Background()
- for at := int64(0); at < prefill; at++ {
+ for at := range prefill {
err = updater.Update(ctx, at, payload)
if err != nil {
b.Fatal(err)
@@ -58,7 +58,7 @@ func BenchmarkFinder(b *testing.B) {
} {
names := []string{"sync", "async"}
b.Run(fmt.Sprintf("%s:prefill=%d, latest=%d, now=%d", names[k], prefill, latest, now), func(b *testing.B) {
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
_, _, _, err := finder.At(ctx, int64(now), after)
if err != nil {
b.Fatal(err)
diff --git a/pkg/feeds/getter.go b/pkg/feeds/getter.go
index 5f902703ef8..40ccf98bca7 100644
--- a/pkg/feeds/getter.go
+++ b/pkg/feeds/getter.go
@@ -15,7 +15,15 @@ import (
"github.com/ethersphere/bee/v2/pkg/swarm"
)
-var errNotLegacyPayload = errors.New("feed update is not in the legacy payload structure")
+var ErrNotLegacyPayload = errors.New("feed update is not in the legacy payload structure")
+
+type WrappedChunkNotFoundError struct {
+ Ref []byte
+}
+
+func (e WrappedChunkNotFoundError) Error() string {
+ return fmt.Sprintf("feed pointing to the wrapped chunk not found: %x", e.Ref)
+}
// Lookup is the interface for time based feed lookup
type Lookup interface {
@@ -50,7 +58,7 @@ func (f *Getter) Get(ctx context.Context, i Index) (swarm.Chunk, error) {
return f.getter.Get(ctx, addr)
}
-func GetWrappedChunk(ctx context.Context, getter storage.Getter, ch swarm.Chunk) (swarm.Chunk, error) {
+func GetWrappedChunk(ctx context.Context, getter storage.Getter, ch swarm.Chunk, legacyResolve bool) (swarm.Chunk, error) {
wc, err := FromChunk(ch)
if err != nil {
return nil, err
@@ -59,16 +67,15 @@ func GetWrappedChunk(ctx context.Context, getter storage.Getter, ch swarm.Chunk)
// possible values right now:
// unencrypted ref: span+timestamp+ref => 8+8+32=48
// encrypted ref: span+timestamp+ref+decryptKey => 8+8+64=80
- ref, err := legacyPayload(wc)
- if err != nil {
- if errors.Is(err, errNotLegacyPayload) {
- return wc, nil
+ if legacyResolve {
+ ref, err := legacyPayload(wc)
+ if err != nil {
+ return nil, err
+ }
+ wc, err = getter.Get(ctx, ref)
+ if err != nil {
+ return nil, WrappedChunkNotFoundError{Ref: ref.Bytes()}
}
- return nil, err
- }
- wc, err = getter.Get(ctx, ref)
- if err != nil {
- return nil, err
}
return wc, nil
@@ -86,9 +93,20 @@ func FromChunk(ch swarm.Chunk) (swarm.Chunk, error) {
// legacyPayload returns back the referenced chunk and datetime from the legacy feed payload
func legacyPayload(wrappedChunk swarm.Chunk) (swarm.Address, error) {
cacData := wrappedChunk.Data()
- if !(len(cacData) == 16+swarm.HashSize || len(cacData) == 16+swarm.HashSize*2) {
- return swarm.ZeroAddress, errNotLegacyPayload
+ if !isV1Length(len(cacData)) {
+ return swarm.ZeroAddress, ErrNotLegacyPayload
}
-
return swarm.NewAddress(cacData[16:]), nil
}
+
+func IsV1Payload(ch swarm.Chunk) (bool, error) {
+ cc, err := FromChunk(ch)
+ if err != nil {
+ return false, err
+ }
+ return isV1Length(len(cc.Data())), nil
+}
+
+func isV1Length(length int) bool {
+ return length == 16+swarm.HashSize || length == 16+swarm.HashSize*2
+}
diff --git a/pkg/feeds/getter_test.go b/pkg/feeds/getter_test.go
index d09d5b837aa..df4f9b06c11 100644
--- a/pkg/feeds/getter_test.go
+++ b/pkg/feeds/getter_test.go
@@ -17,18 +17,24 @@ import (
func TestGetWrappedChunk(t *testing.T) {
storer := mockstorer.New()
+ data := []byte("data")
// new format (wraps chunk)
- ch := soctesting.GenerateMockSOC(t, []byte("data")).Chunk()
- wch, err := GetWrappedChunk(context.Background(), storer.ChunkStore(), ch)
+ ch := soctesting.GenerateMockSOC(t, data).Chunk()
+ wch, err := GetWrappedChunk(context.Background(), storer.ChunkStore(), ch, false)
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(wch.Data()[8:], []byte("data")) {
+ if !bytes.Equal(wch.Data()[8:], data) {
t.Fatal("data mismatch")
}
// old format
+ err = storer.Put(context.Background(), wch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
tt := []struct {
name string
addr []byte
@@ -49,19 +55,30 @@ func TestGetWrappedChunk(t *testing.T) {
binary.BigEndian.PutUint64(timestamp, 1)
ch = soctesting.GenerateMockSOC(t, append(timestamp, tc.addr...)).Chunk()
- err = storer.Put(context.Background(), wch)
+ wch, err = GetWrappedChunk(context.Background(), storer.ChunkStore(), ch, true)
if err != nil {
t.Fatal(err)
}
- wch, err = GetWrappedChunk(context.Background(), storer.ChunkStore(), ch)
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(wch.Data()[8:], []byte("data")) {
+ if !bytes.Equal(wch.Data()[8:], data) {
t.Fatal("data mismatch")
}
})
}
+
+ t.Run("returns feed legacy payload", func(t *testing.T) {
+ timestamp := make([]byte, 8)
+ binary.BigEndian.PutUint64(timestamp, 1)
+ feedChData := append(timestamp, wch.Address().Bytes()...)
+ ch = soctesting.GenerateMockSOC(t, feedChData).Chunk()
+
+ wch, err = GetWrappedChunk(context.Background(), storer.ChunkStore(), ch, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(wch.Data()[8:], feedChData) {
+ t.Fatal("data should be similar as old legacy feed payload format.")
+ }
+ })
}
diff --git a/pkg/feeds/sequence/lookup_benchmark_test.go b/pkg/feeds/sequence/lookup_benchmark_test.go
index e81d8cc9a61..73e9255d497 100644
--- a/pkg/feeds/sequence/lookup_benchmark_test.go
+++ b/pkg/feeds/sequence/lookup_benchmark_test.go
@@ -36,7 +36,7 @@ func BenchmarkFinder(b *testing.B) {
ctx := context.Background()
- for at := int64(0); at < prefill; at++ {
+ for at := range prefill {
err = updater.Update(ctx, at, payload)
if err != nil {
b.Fatal(err)
@@ -54,7 +54,7 @@ func BenchmarkFinder(b *testing.B) {
} {
names := []string{"sync", "async"}
b.Run(fmt.Sprintf("%s:prefill=%d, latest/now=%d", names[k], prefill, now), func(b *testing.B) {
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
_, _, _, err := finder.At(ctx, now, 0)
if err != nil {
b.Fatal(err)
diff --git a/pkg/feeds/testing/lookup.go b/pkg/feeds/testing/lookup.go
index 21656be5b64..e56e3c477ba 100644
--- a/pkg/feeds/testing/lookup.go
+++ b/pkg/feeds/testing/lookup.go
@@ -186,7 +186,7 @@ func TestFinderIntervals(t *testing.T, nextf func() (bool, int64), finderf func(
func TestFinderRandomIntervals(t *testing.T, finderf func(storage.Getter, *feeds.Feed) feeds.Lookup, updaterf func(putter storage.Putter, signer crypto.Signer, topic []byte) (feeds.Updater, error)) {
t.Parallel()
- for j := 0; j < 3; j++ {
+ for j := range 3 {
t.Run(fmt.Sprintf("random intervals %d", j), func(t *testing.T) {
t.Parallel()
diff --git a/pkg/file/buffer.go b/pkg/file/buffer.go
index 976eca20fa0..aaf36f535f3 100644
--- a/pkg/file/buffer.go
+++ b/pkg/file/buffer.go
@@ -42,10 +42,7 @@ func (c *ChunkPipe) Read(b []byte) (int, error) {
func (c *ChunkPipe) Write(b []byte) (int, error) {
nw := 0
- for {
- if nw >= len(b) {
- break
- }
+ for nw < len(b) {
copied := copy(c.data[c.cursor:], b[nw:])
c.cursor += copied
diff --git a/pkg/file/buffer_test.go b/pkg/file/buffer_test.go
index 9fdf66266b6..71b374b7f4b 100644
--- a/pkg/file/buffer_test.go
+++ b/pkg/file/buffer_test.go
@@ -119,8 +119,8 @@ func TestCopyBuffer(t *testing.T) {
dataSize int
}{}
- for i := 0; i < len(readBufferSizes); i++ {
- for j := 0; j < len(dataSizes); j++ {
+ for i := range readBufferSizes {
+ for j := range dataSizes {
testCases = append(testCases, struct {
readBufferSize int
dataSize int
diff --git a/pkg/file/joiner/joiner.go b/pkg/file/joiner/joiner.go
index 4d9152357c0..cc89f921397 100644
--- a/pkg/file/joiner/joiner.go
+++ b/pkg/file/joiner/joiner.go
@@ -65,9 +65,24 @@ func fingerprint(addrs []swarm.Address) string {
return string(h.Sum(nil))
}
+// createRemoveCallback returns a function that handles the cleanup after a recovery attempt
+func (g *decoderCache) createRemoveCallback(key string) func(error) {
+ return func(err error) {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ if err != nil {
+ // signals that a new getter is needed to reattempt to recover the data
+ delete(g.cache, key)
+ } else {
+ // signals that the chunks were fetched/recovered/cached so a future getter is not needed
+ // The nil value indicates a successful recovery
+ g.cache[key] = nil
+ }
+ }
+}
+
// GetOrCreate returns a decoder for the given chunk address
func (g *decoderCache) GetOrCreate(addrs []swarm.Address, shardCnt int) storage.Getter {
-
// since a recovery decoder is not allowed, simply return the underlying netstore
if g.config.Strict && g.config.Strategy == getter.NONE {
return g.fetcher
@@ -83,22 +98,31 @@ func (g *decoderCache) GetOrCreate(addrs []swarm.Address, shardCnt int) storage.
d, ok := g.cache[key]
if ok {
if d == nil {
- return g.fetcher
+ // The nil value indicates a previous successful recovery
+ // Create a new decoder but only use it as fallback if network fetch fails
+ decoderCallback := g.createRemoveCallback(key)
+
+ // Create a factory function that will instantiate the decoder only when needed
+ recovery := func() storage.Getter {
+ g.config.Logger.Debug("lazy-creating recovery decoder after fetch failed", "key", key)
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ d, ok := g.cache[key]
+ if ok && d != nil {
+ return d
+ }
+ d = getter.New(addrs, shardCnt, g.fetcher, g.putter, decoderCallback, g.config)
+ g.cache[key] = d
+ return d
+ }
+
+ return getter.NewReDecoder(g.fetcher, recovery, g.config.Logger)
}
return d
}
- remove := func(err error) {
- g.mu.Lock()
- defer g.mu.Unlock()
- if err != nil {
- // signals that a new getter is needed to reattempt to recover the data
- delete(g.cache, key)
- } else {
- // signals that the chunks were fetched/recovered/cached so a future getter is not needed
- g.cache[key] = nil
- }
- }
- d = getter.New(addrs, shardCnt, g.fetcher, g.putter, remove, g.config)
+
+ removeCallback := g.createRemoveCallback(key)
+ d = getter.New(addrs, shardCnt, g.fetcher, g.putter, removeCallback, g.config)
g.cache[key] = d
return d
}
@@ -185,10 +209,7 @@ func (j *joiner) ReadAt(buffer []byte, off int64) (read int, err error) {
return 0, io.EOF
}
- readLen := int64(cap(buffer))
- if readLen > j.span-off {
- readLen = j.span - off
- }
+ readLen := min(int64(cap(buffer)), j.span-off)
var bytesRead int64
var eg errgroup.Group
j.readAtOffset(buffer, j.rootData, 0, j.span, off, 0, readLen, &bytesRead, j.rootParity, &eg)
@@ -253,14 +274,9 @@ func (j *joiner) readAtOffset(
subtrieSpanLimit := sec
currentReadSize := subtrieSpan - (off - cur) // the size of the subtrie, minus the offset from the start of the trie
-
// upper bound alignments
- if currentReadSize > bytesToRead {
- currentReadSize = bytesToRead
- }
- if currentReadSize > subtrieSpan {
- currentReadSize = subtrieSpan
- }
+ currentReadSize = min(currentReadSize, bytesToRead)
+ currentReadSize = min(currentReadSize, subtrieSpan)
func(address swarm.Address, b []byte, cur, subTrieSize, off, bufferOffset, bytesToRead, subtrieSpanLimit int64) {
eg.Go(func() error {
diff --git a/pkg/file/joiner/joiner_test.go b/pkg/file/joiner/joiner_test.go
index dfed2aecc91..0cb81314582 100644
--- a/pkg/file/joiner/joiner_test.go
+++ b/pkg/file/joiner/joiner_test.go
@@ -15,6 +15,7 @@ import (
mrand "math/rand"
"sync"
"testing"
+ "testing/synctest"
"time"
"github.com/ethersphere/bee/v2/pkg/cac"
@@ -56,8 +57,7 @@ func TestJoinerSingleChunk(t *testing.T) {
store := inmemchunkstore.New()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
// create the chunk to
mockAddrHex := fmt.Sprintf("%064s", "2a")
@@ -95,8 +95,7 @@ func TestJoinerDecryptingStore_NormalChunk(t *testing.T) {
st := inmemchunkstore.New()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
// create the chunk to
mockAddrHex := fmt.Sprintf("%064s", "2a")
@@ -134,8 +133,7 @@ func TestJoinerWithReference(t *testing.T) {
st := inmemchunkstore.New()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
// create root chunk and two data chunks referenced in the root chunk
rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
@@ -185,8 +183,7 @@ func TestJoinerMalformed(t *testing.T) {
store := inmemchunkstore.New()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
subTrie := []byte{8085: 1}
pb := builder.NewPipelineBuilder(ctx, store, false, 0)
@@ -253,8 +250,7 @@ func TestEncryptDecrypt(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
pipe := builder.NewPipelineBuilder(ctx, store, true, 0)
testDataReader := bytes.NewReader(testData)
resultAddress, err := builder.FeedPipeline(ctx, pipe, testDataReader)
@@ -335,8 +331,7 @@ func TestSeek(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
store := inmemchunkstore.New()
testutil.CleanupCloser(t, store)
@@ -612,8 +607,7 @@ func TestPrefetch(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
store := inmemchunkstore.New()
testutil.CleanupCloser(t, store)
@@ -646,265 +640,266 @@ func TestPrefetch(t *testing.T) {
}
func TestJoinerReadAt(t *testing.T) {
- t.Parallel()
+ synctest.Test(t, func(t *testing.T) {
+ store := inmemchunkstore.New()
- store := inmemchunkstore.New()
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
+ ctx, cancel := context.WithTimeout(t.Context(), time.Second)
+ defer cancel()
- // create root chunk with 2 references and the referenced data chunks
- rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
- err := store.Put(ctx, rootChunk)
- if err != nil {
- t.Fatal(err)
- }
+ // create root chunk with 2 references and the referenced data chunks
+ rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
+ err := store.Put(ctx, rootChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
- firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
- err = store.Put(ctx, firstChunk)
- if err != nil {
- t.Fatal(err)
- }
+ firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
+ firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
+ err = store.Put(ctx, firstChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
- secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
- err = store.Put(ctx, secondChunk)
- if err != nil {
- t.Fatal(err)
- }
+ secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
+ secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
+ err = store.Put(ctx, secondChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- j, _, err := joiner.New(ctx, store, store, rootChunk.Address(), redundancy.DefaultLevel)
- if err != nil {
- t.Fatal(err)
- }
+ j, _, err := joiner.New(ctx, store, store, rootChunk.Address(), redundancy.DefaultLevel)
+ if err != nil {
+ t.Fatal(err)
+ }
- b := make([]byte, swarm.ChunkSize)
- _, err = j.ReadAt(b, swarm.ChunkSize)
- if err != nil {
- t.Fatal(err)
- }
+ b := make([]byte, swarm.ChunkSize)
+ _, err = j.ReadAt(b, swarm.ChunkSize)
+ if err != nil {
+ t.Fatal(err)
+ }
- if !bytes.Equal(b, secondChunk.Data()[8:]) {
- t.Fatal("data read at offset not equal to expected chunk")
- }
+ if !bytes.Equal(b, secondChunk.Data()[8:]) {
+ t.Fatal("data read at offset not equal to expected chunk")
+ }
+ })
}
// TestJoinerOneLevel tests the retrieval of two data chunks immediately
-// below the root chunk level.
+// below the root chunk level. It verifies that the joiner correctly reads
+// and returns data from both chunks in sequence and handles EOF properly.
func TestJoinerOneLevel(t *testing.T) {
- t.Parallel()
-
- store := inmemchunkstore.New()
+ synctest.Test(t, func(t *testing.T) {
+ store := inmemchunkstore.New()
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
+ ctx, cancel := context.WithTimeout(t.Context(), time.Second)
+ defer cancel()
- // create root chunk with 2 references and the referenced data chunks
- rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
- err := store.Put(ctx, rootChunk)
- if err != nil {
- t.Fatal(err)
- }
+ // create root chunk with 2 references and the referenced data chunks
+ rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
+ err := store.Put(ctx, rootChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
- firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
- err = store.Put(ctx, firstChunk)
- if err != nil {
- t.Fatal(err)
- }
+ firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
+ firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
+ err = store.Put(ctx, firstChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
- secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
- err = store.Put(ctx, secondChunk)
- if err != nil {
- t.Fatal(err)
- }
+ secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
+ secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
+ err = store.Put(ctx, secondChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- j, _, err := joiner.New(ctx, store, store, rootChunk.Address(), redundancy.DefaultLevel)
- if err != nil {
- t.Fatal(err)
- }
+ j, _, err := joiner.New(ctx, store, store, rootChunk.Address(), redundancy.DefaultLevel)
+ if err != nil {
+ t.Fatal(err)
+ }
- // verify first chunk content
- outBuffer := make([]byte, swarm.ChunkSize)
- c, err := j.Read(outBuffer)
- if err != nil {
- t.Fatal(err)
- }
- if c != swarm.ChunkSize {
- t.Fatalf("expected firstchunk read count %d, got %d", swarm.ChunkSize, c)
- }
- if !bytes.Equal(outBuffer, firstChunk.Data()[8:]) {
- t.Fatalf("firstchunk data mismatch, expected %x, got %x", outBuffer, firstChunk.Data()[8:])
- }
+ // verify first chunk content
+ outBuffer := make([]byte, swarm.ChunkSize)
+ c, err := j.Read(outBuffer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c != swarm.ChunkSize {
+ t.Fatalf("expected firstchunk read count %d, got %d", swarm.ChunkSize, c)
+ }
+ if !bytes.Equal(outBuffer, firstChunk.Data()[8:]) {
+ t.Fatalf("firstchunk data mismatch, expected %x, got %x", outBuffer, firstChunk.Data()[8:])
+ }
- // verify second chunk content
- c, err = j.Read(outBuffer)
- if err != nil {
- t.Fatal(err)
- }
- if c != swarm.ChunkSize {
- t.Fatalf("expected secondchunk read count %d, got %d", swarm.ChunkSize, c)
- }
- if !bytes.Equal(outBuffer, secondChunk.Data()[8:]) {
- t.Fatalf("secondchunk data mismatch, expected %x, got %x", outBuffer, secondChunk.Data()[8:])
- }
+ // verify second chunk content
+ c, err = j.Read(outBuffer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c != swarm.ChunkSize {
+ t.Fatalf("expected secondchunk read count %d, got %d", swarm.ChunkSize, c)
+ }
+ if !bytes.Equal(outBuffer, secondChunk.Data()[8:]) {
+ t.Fatalf("secondchunk data mismatch, expected %x, got %x", outBuffer, secondChunk.Data()[8:])
+ }
- // verify EOF is returned also after first time it is returned
- _, err = j.Read(outBuffer)
- if !errors.Is(err, io.EOF) {
- t.Fatal("expected io.EOF")
- }
+ // verify EOF is returned also after first time it is returned
+ _, err = j.Read(outBuffer)
+ if !errors.Is(err, io.EOF) {
+ t.Fatal("expected io.EOF")
+ }
- _, err = j.Read(outBuffer)
- if !errors.Is(err, io.EOF) {
- t.Fatal("expected io.EOF")
- }
+ _, err = j.Read(outBuffer)
+ if !errors.Is(err, io.EOF) {
+ t.Fatal("expected io.EOF")
+ }
+ })
}
// TestJoinerTwoLevelsAcrossChunk tests the retrieval of data chunks below
// first intermediate level across two intermediate chunks.
// Last chunk has sub-chunk length.
func TestJoinerTwoLevelsAcrossChunk(t *testing.T) {
- t.Parallel()
+ synctest.Test(t, func(t *testing.T) {
+ store := inmemchunkstore.New()
- store := inmemchunkstore.New()
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
+ ctx, cancel := context.WithTimeout(t.Context(), time.Second)
+ defer cancel()
- // create root chunk with 2 references and two intermediate chunks with references
- rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*swarm.Branches+42, swarm.SectionSize*2)
- err := store.Put(ctx, rootChunk)
- if err != nil {
- t.Fatal(err)
- }
+ // create root chunk with 2 references and two intermediate chunks with references
+ rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*swarm.Branches+42, swarm.SectionSize*2)
+ err := store.Put(ctx, rootChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
- firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize*swarm.Branches, swarm.ChunkSize)
- err = store.Put(ctx, firstChunk)
- if err != nil {
- t.Fatal(err)
- }
+ firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
+ firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize*swarm.Branches, swarm.ChunkSize)
+ err = store.Put(ctx, firstChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
- secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, 42, swarm.SectionSize)
- err = store.Put(ctx, secondChunk)
- if err != nil {
- t.Fatal(err)
- }
+ secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
+ secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, 42, swarm.SectionSize)
+ err = store.Put(ctx, secondChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- // create 128+1 chunks for all references in the intermediate chunks
- cursor := 8
- for i := 0; i < swarm.Branches; i++ {
- chunkAddressBytes := firstChunk.Data()[cursor : cursor+swarm.SectionSize]
+ // create 128+1 chunks for all references in the intermediate chunks
+ cursor := 8
+ for range swarm.Branches {
+ chunkAddressBytes := firstChunk.Data()[cursor : cursor+swarm.SectionSize]
+ chunkAddress := swarm.NewAddress(chunkAddressBytes)
+ ch := filetest.GenerateTestRandomFileChunk(chunkAddress, swarm.ChunkSize, swarm.ChunkSize)
+ err := store.Put(ctx, ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cursor += swarm.SectionSize
+ }
+ chunkAddressBytes := secondChunk.Data()[8:]
chunkAddress := swarm.NewAddress(chunkAddressBytes)
- ch := filetest.GenerateTestRandomFileChunk(chunkAddress, swarm.ChunkSize, swarm.ChunkSize)
- err := store.Put(ctx, ch)
+ ch := filetest.GenerateTestRandomFileChunk(chunkAddress, 42, 42)
+ err = store.Put(ctx, ch)
if err != nil {
t.Fatal(err)
}
- cursor += swarm.SectionSize
- }
- chunkAddressBytes := secondChunk.Data()[8:]
- chunkAddress := swarm.NewAddress(chunkAddressBytes)
- ch := filetest.GenerateTestRandomFileChunk(chunkAddress, 42, 42)
- err = store.Put(ctx, ch)
- if err != nil {
- t.Fatal(err)
- }
- j, _, err := joiner.New(ctx, store, store, rootChunk.Address(), redundancy.DefaultLevel)
- if err != nil {
- t.Fatal(err)
- }
+ j, _, err := joiner.New(ctx, store, store, rootChunk.Address(), redundancy.DefaultLevel)
+ if err != nil {
+ t.Fatal(err)
+ }
- // read back all the chunks and verify
- b := make([]byte, swarm.ChunkSize)
- for i := 0; i < swarm.Branches; i++ {
+ // read back all the chunks and verify
+ b := make([]byte, swarm.ChunkSize)
+ for i := range swarm.Branches {
+ c, err := j.Read(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c != swarm.ChunkSize {
+ t.Fatalf("chunk %d expected read %d bytes; got %d", i, swarm.ChunkSize, c)
+ }
+ }
c, err := j.Read(b)
if err != nil {
t.Fatal(err)
}
- if c != swarm.ChunkSize {
- t.Fatalf("chunk %d expected read %d bytes; got %d", i, swarm.ChunkSize, c)
+ if c != 42 {
+ t.Fatalf("last chunk expected read %d bytes; got %d", 42, c)
}
- }
- c, err := j.Read(b)
- if err != nil {
- t.Fatal(err)
- }
- if c != 42 {
- t.Fatalf("last chunk expected read %d bytes; got %d", 42, c)
- }
+ })
}
func TestJoinerIterateChunkAddresses(t *testing.T) {
- t.Parallel()
+ synctest.Test(t, func(t *testing.T) {
+ store := inmemchunkstore.New()
- store := inmemchunkstore.New()
+ ctx, cancel := context.WithTimeout(t.Context(), time.Second)
+ defer cancel()
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
+ // create root chunk with 2 references and the referenced data chunks
+ rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
+ err := store.Put(ctx, rootChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- // create root chunk with 2 references and the referenced data chunks
- rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
- err := store.Put(ctx, rootChunk)
- if err != nil {
- t.Fatal(err)
- }
+ firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
+ firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
+ err = store.Put(ctx, firstChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
- firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
- err = store.Put(ctx, firstChunk)
- if err != nil {
- t.Fatal(err)
- }
+ secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
+ secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
+ err = store.Put(ctx, secondChunk)
+ if err != nil {
+ t.Fatal(err)
+ }
- secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
- secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
- err = store.Put(ctx, secondChunk)
- if err != nil {
- t.Fatal(err)
- }
+ createdAddresses := []swarm.Address{rootChunk.Address(), firstAddress, secondAddress}
- createdAddresses := []swarm.Address{rootChunk.Address(), firstAddress, secondAddress}
+ j, _, err := joiner.New(ctx, store, store, rootChunk.Address(), redundancy.DefaultLevel)
+ if err != nil {
+ t.Fatal(err)
+ }
- j, _, err := joiner.New(ctx, store, store, rootChunk.Address(), redundancy.DefaultLevel)
- if err != nil {
- t.Fatal(err)
- }
+ foundAddresses := make(map[string]struct{})
+ var foundAddressesMu sync.Mutex
- foundAddresses := make(map[string]struct{})
- var foundAddressesMu sync.Mutex
+ err = j.IterateChunkAddresses(func(addr swarm.Address) error {
+ foundAddressesMu.Lock()
+ defer foundAddressesMu.Unlock()
- err = j.IterateChunkAddresses(func(addr swarm.Address) error {
- foundAddressesMu.Lock()
- defer foundAddressesMu.Unlock()
-
- foundAddresses[addr.String()] = struct{}{}
- return nil
- })
- if err != nil {
- t.Fatal(err)
- }
+ foundAddresses[addr.String()] = struct{}{}
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
- if len(createdAddresses) != len(foundAddresses) {
- t.Fatalf("expected to find %d addresses, got %d", len(createdAddresses), len(foundAddresses))
- }
+ if len(createdAddresses) != len(foundAddresses) {
+ t.Fatalf("expected to find %d addresses, got %d", len(createdAddresses), len(foundAddresses))
+ }
- checkAddressFound := func(t *testing.T, foundAddresses map[string]struct{}, address swarm.Address) {
- t.Helper()
+ checkAddressFound := func(t *testing.T, foundAddresses map[string]struct{}, address swarm.Address) {
+ t.Helper()
- if _, ok := foundAddresses[address.String()]; !ok {
- t.Fatalf("expected address %s not found", address.String())
+ if _, ok := foundAddresses[address.String()]; !ok {
+ t.Fatalf("expected address %s not found", address.String())
+ }
}
- }
- for _, createdAddress := range createdAddresses {
- checkAddressFound(t, foundAddresses, createdAddress)
- }
+ for _, createdAddress := range createdAddresses {
+ checkAddressFound(t, foundAddresses, createdAddress)
+ }
+ })
}
func TestJoinerIterateChunkAddresses_Encrypted(t *testing.T) {
@@ -917,8 +912,7 @@ func TestJoinerIterateChunkAddresses_Encrypted(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
pipe := builder.NewPipelineBuilder(ctx, store, true, 0)
testDataReader := bytes.NewReader(testData)
resultAddress, err := builder.FeedPipeline(ctx, pipe, testDataReader)
@@ -1072,8 +1066,7 @@ func TestJoinerRedundancy(t *testing.T) {
},
} {
t.Run(fmt.Sprintf("redundancy=%d encryption=%t", tc.rLevel, tc.encryptChunk), func(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
shardCnt := tc.rLevel.GetMaxShards()
parityCnt := tc.rLevel.GetParities(shardCnt)
if tc.encryptChunk {
@@ -1118,7 +1111,7 @@ func TestJoinerRedundancy(t *testing.T) {
}
// all data can be read back
readCheck := func(t *testing.T, expErr error) {
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(t.Context())
defer cancel()
decodeTimeoutStr := time.Second.String()
@@ -1235,143 +1228,150 @@ func TestJoinerRedundancy(t *testing.T) {
//
// nolint:thelper
func TestJoinerRedundancyMultilevel(t *testing.T) {
- t.Parallel()
- test := func(t *testing.T, rLevel redundancy.Level, encrypt bool, size int) {
- t.Helper()
- store := mockstorer.NewForgettingStore(newChunkStore())
- seed, err := pseudorand.NewSeed()
- if err != nil {
- t.Fatal(err)
- }
- dataReader := pseudorand.NewReader(seed, size*swarm.ChunkSize)
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- pipe := builder.NewPipelineBuilder(ctx, store, encrypt, rLevel)
- addr, err := builder.FeedPipeline(ctx, pipe, dataReader)
- if err != nil {
- t.Fatal(err)
- }
- expRead := swarm.ChunkSize
- buf := make([]byte, expRead)
- offset := mrand.Intn(size) * expRead
- canReadRange := func(t *testing.T, s getter.Strategy, fallback bool, canRead bool) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ r2level := []int{2, 1, 2, 3, 2}
+ encryptChunk := []bool{false, false, true, true, true}
- decodingTimeoutStr := time.Second.String()
+ for _, rLevel := range []redundancy.Level{0, 1, 2, 3, 4} {
+ for _, encrypt := range []bool{false, true} {
+ for _, levels := range []int{1, 2, 3} {
+ // Skip tests that don't meet the filter criteria to save time
+ if r2level[rLevel] != levels || encrypt != encryptChunk[rLevel] {
+ t.Skip("skipping to save time")
+ continue
+ }
- ctx, err := getter.SetConfigInContext(ctx, &s, &fallback, &decodingTimeoutStr, log.Noop)
- if err != nil {
- t.Fatal(err)
- }
+ // Calculate chunk count
+ shardCnt := rLevel.GetMaxShards()
+ if encrypt {
+ shardCnt = rLevel.GetMaxEncShards()
+ }
- j, _, err := joiner.New(ctx, store, store, addr, redundancy.DefaultLevel)
- if err != nil {
- t.Fatal(err)
- }
- n, err := j.ReadAt(buf, int64(offset))
- if !canRead {
- if !errors.Is(err, storage.ErrNotFound) && !errors.Is(err, context.DeadlineExceeded) {
- t.Fatalf("expected error %v or %v. got %v", storage.ErrNotFound, context.DeadlineExceeded, err)
+ var chunkCnt int
+ switch levels {
+ case 1:
+ chunkCnt = 2
+ case 2:
+ chunkCnt = shardCnt + 1
+ case 3:
+ chunkCnt = shardCnt*shardCnt + 1
}
- return
- }
- if err != nil {
- t.Fatal(err)
- }
- if n != expRead {
- t.Errorf("read %d bytes out of %d", n, expRead)
- }
- _, err = dataReader.Seek(int64(offset), io.SeekStart)
- if err != nil {
- t.Fatal(err)
- }
- ok, err := dataReader.Match(bytes.NewBuffer(buf), expRead)
- if err != nil {
- t.Fatal(err)
- }
- if !ok {
- t.Error("content mismatch")
+
+ testName := fmt.Sprintf("rLevel=%v_encrypt=%v_levels=%d_chunks=%d_incomplete", rLevel, encrypt, levels, chunkCnt)
+ t.Run(testName, func(t *testing.T) {
+ synctest.Test(t, func(t *testing.T) {
+ runRedundancyTest(t, rLevel, encrypt, chunkCnt)
+ })
+ })
+
+ // For the "full" test cases
+ switch levels {
+ case 1:
+ chunkCnt = shardCnt
+ case 2:
+ chunkCnt = shardCnt * shardCnt
+ case 3:
+ continue
+ }
+
+ testName = fmt.Sprintf("rLevel=%v_encrypt=%v_levels=%d_chunks=%d_full", rLevel, encrypt, levels, chunkCnt)
+ t.Run(testName, func(t *testing.T) {
+ synctest.Test(t, func(t *testing.T) {
+ runRedundancyTest(t, rLevel, encrypt, chunkCnt)
+ })
+ })
}
}
+ }
+}
- // first sanity check and recover a range
- t.Run("NONE w/o fallback CAN retrieve", func(t *testing.T) {
- store.Record()
- defer store.Unrecord()
- canReadRange(t, getter.NONE, false, true)
- })
+// Helper function to run the redundancy test without nested t.Run calls
+func runRedundancyTest(t *testing.T, rLevel redundancy.Level, encrypt bool, size int) {
+ t.Helper()
- // do not forget the root chunk
- store.Unmiss(swarm.NewAddress(addr.Bytes()[:swarm.HashSize]))
- // after we forget the chunks on the way to the range, we should not be able to retrieve
- t.Run("NONE w/o fallback CANNOT retrieve", func(t *testing.T) {
- canReadRange(t, getter.NONE, false, false)
- })
+ store := mockstorer.NewForgettingStore(newChunkStore())
+ seed, err := pseudorand.NewSeed()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dataReader := pseudorand.NewReader(seed, size*swarm.ChunkSize)
+ ctx, cancel := context.WithCancel(t.Context())
+ defer cancel()
+ pipe := builder.NewPipelineBuilder(ctx, store, encrypt, rLevel)
+ addr, err := builder.FeedPipeline(ctx, pipe, dataReader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expRead := swarm.ChunkSize
+ buf := make([]byte, expRead)
+ offset := mrand.Intn(size) * expRead
- // we lost a data chunk, we cannot recover using DATA only strategy with no fallback
- t.Run("DATA w/o fallback CANNOT retrieve", func(t *testing.T) {
- canReadRange(t, getter.DATA, false, false)
- })
+ // Helper function to test reading with different strategies
+ canReadRange := func(s getter.Strategy, fallback bool, canRead bool, description string) {
+ t.Logf("Testing: %s", description)
+ ctx, cancel := context.WithCancel(t.Context())
+ defer cancel()
- if rLevel == 0 {
- // allowing fallback mode will not help if no redundancy used for upload
- t.Run("DATA w fallback CANNOT retrieve", func(t *testing.T) {
- canReadRange(t, getter.DATA, true, false)
- })
+ decodingTimeoutStr := time.Second.String()
+
+ ctx, err := getter.SetConfigInContext(ctx, &s, &fallback, &decodingTimeoutStr, log.Noop)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ j, _, err := joiner.New(ctx, store, store, addr, redundancy.DefaultLevel)
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := j.ReadAt(buf, int64(offset))
+ if !canRead {
+ if !errors.Is(err, storage.ErrNotFound) && !errors.Is(err, context.DeadlineExceeded) {
+ t.Fatalf("expected error %v or %v. got %v", storage.ErrNotFound, context.DeadlineExceeded, err)
+ }
return
}
- // allowing fallback mode will make the range retrievable using erasure decoding
- t.Run("DATA w fallback CAN retrieve", func(t *testing.T) {
- canReadRange(t, getter.DATA, true, true)
- })
- // after the reconstructed data is stored, we can retrieve the range using DATA only mode
- t.Run("after recovery, NONE w/o fallback CAN retrieve", func(t *testing.T) {
- canReadRange(t, getter.NONE, false, true)
- })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != expRead {
+ t.Errorf("read %d bytes out of %d", n, expRead)
+ }
+ if _, err = dataReader.Seek(int64(offset), io.SeekStart); err != nil {
+ t.Fatal(err)
+ }
+ ok, err := dataReader.Match(bytes.NewBuffer(buf), expRead)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Error("content mismatch")
+ }
}
- r2level := []int{2, 1, 2, 3, 2}
- encryptChunk := []bool{false, false, true, true, true}
- for _, rLevel := range []redundancy.Level{0, 1, 2, 3, 4} {
- // speeding up tests by skipping some of them
- t.Run(fmt.Sprintf("rLevel=%v", rLevel), func(t *testing.T) {
- t.Parallel()
- for _, encrypt := range []bool{false, true} {
- shardCnt := rLevel.GetMaxShards()
- if encrypt {
- shardCnt = rLevel.GetMaxEncShards()
- }
- for _, levels := range []int{1, 2, 3} {
- chunkCnt := 1
- switch levels {
- case 1:
- chunkCnt = 2
- case 2:
- chunkCnt = shardCnt + 1
- case 3:
- chunkCnt = shardCnt*shardCnt + 1
- }
- t.Run(fmt.Sprintf("encrypt=%v levels=%d chunks=%d incomplete", encrypt, levels, chunkCnt), func(t *testing.T) {
- if r2level[rLevel] != levels || encrypt != encryptChunk[rLevel] {
- t.Skip("skipping to save time")
- }
- test(t, rLevel, encrypt, chunkCnt)
- })
- switch levels {
- case 1:
- chunkCnt = shardCnt
- case 2:
- chunkCnt = shardCnt * shardCnt
- case 3:
- continue
- }
- t.Run(fmt.Sprintf("encrypt=%v levels=%d chunks=%d full", encrypt, levels, chunkCnt), func(t *testing.T) {
- test(t, rLevel, encrypt, chunkCnt)
- })
- }
- }
- })
+
+ // First sanity check and recover a range
+ store.Record()
+ canReadRange(getter.NONE, false, true, "NONE w/o fallback CAN retrieve")
+ store.Unrecord()
+
+ // Do not forget the root chunk
+ store.Unmiss(swarm.NewAddress(addr.Bytes()[:swarm.HashSize]))
+
+ // After we forget the chunks on the way to the range, we should not be able to retrieve
+ canReadRange(getter.NONE, false, false, "NONE w/o fallback CANNOT retrieve")
+
+ // We lost a data chunk, we cannot recover using DATA only strategy with no fallback
+ canReadRange(getter.DATA, false, false, "DATA w/o fallback CANNOT retrieve")
+
+ if rLevel == 0 {
+ // Allowing fallback mode will not help if no redundancy used for upload
+ canReadRange(getter.DATA, true, false, "DATA w fallback CANNOT retrieve")
+ return
}
+
+ // Allowing fallback mode will make the range retrievable using erasure decoding
+ canReadRange(getter.DATA, true, true, "DATA w fallback CAN retrieve")
+
+ // After the reconstructed data is stored, we can retrieve the range using DATA only mode
+ canReadRange(getter.NONE, false, true, "after recovery, NONE w/o fallback CAN retrieve")
}
type chunkStore struct {
diff --git a/pkg/file/joiner/redecoder_test.go b/pkg/file/joiner/redecoder_test.go
new file mode 100644
index 00000000000..0cf3b604ec2
--- /dev/null
+++ b/pkg/file/joiner/redecoder_test.go
@@ -0,0 +1,209 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package joiner_test
+
+import (
+ "bytes"
+ "context"
+ "strconv"
+ "testing"
+
+ "github.com/ethersphere/bee/v2/pkg/cac"
+ "github.com/ethersphere/bee/v2/pkg/file/joiner"
+ "github.com/ethersphere/bee/v2/pkg/file/redundancy/getter"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storage/inmemchunkstore"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/klauspost/reedsolomon"
+)
+
+// TestReDecoderFlow tests the complete flow of:
+// 1. Loading data with redundancy getter
+// 2. Successful recovery which nulls the decoder
+// 3. Chunk eviction from cache
+// 4. Reloading the same data through ReDecoder fallback
+func TestReDecoderFlow(t *testing.T) {
+ ctx := context.Background()
+ dataShardCount := 4
+ parityShardCount := 2
+ totalShardCount := dataShardCount + parityShardCount
+
+ // Create real data chunks with proper content
+ dataShards := make([][]byte, dataShardCount)
+ for i := range dataShardCount {
+ // Create chunks with simpler test data
+ dataShards[i] = make([]byte, swarm.ChunkWithSpanSize)
+ // Create a unique string for this shard
+ testData := []byte("test-data-" + strconv.Itoa(i))
+ // Copy as much as will fit
+ copy(dataShards[i], testData)
+ }
+
+ // Create parity chunks using Reed-Solomon encoding
+ parityShards := make([][]byte, parityShardCount)
+ for i := range parityShardCount {
+ parityShards[i] = make([]byte, swarm.ChunkWithSpanSize)
+ }
+
+ // Create Reed-Solomon encoder
+ enc, err := reedsolomon.New(dataShardCount, parityShardCount)
+ if err != nil {
+ t.Fatalf("Failed to create Reed-Solomon encoder: %v", err)
+ }
+
+ // Combine data and parity shards
+ allShards := make([][]byte, totalShardCount)
+ copy(allShards, dataShards)
+ copy(allShards[dataShardCount:], parityShards)
+
+ // Encode to generate parity chunks
+ if err := enc.Encode(allShards); err != nil {
+ t.Fatalf("Failed to encode data: %v", err)
+ }
+
+ // Create content-addressed chunks for all shards
+ addresses := make([]swarm.Address, totalShardCount)
+ chunks := make([]swarm.Chunk, totalShardCount)
+
+ for i := range totalShardCount {
+ // Create proper content-addressed chunks
+ chunk, err := cac.NewWithDataSpan(allShards[i])
+ if err != nil {
+ t.Fatalf("Failed to create content-addressed chunk %d: %v", i, err)
+ }
+ chunks[i] = chunk
+ addresses[i] = chunk.Address()
+ }
+
+ // Select a data chunk to be missing (which will be recovered)
+ missingChunkIndex := 2 // The third data chunk will be missing
+ mockStore := inmemchunkstore.New()
+ netFetcher := newMockNetworkFetcher(addresses, addresses[missingChunkIndex])
+ config := getter.Config{
+ Strategy: getter.RACE,
+ Logger: log.Noop,
+ }
+
+ j := joiner.NewDecoderCache(netFetcher, mockStore, config)
+
+ // Step 1: Initializing decoder and triggering recovery
+ decoder := j.GetOrCreate(addresses, dataShardCount)
+ if decoder == nil {
+ t.Fatal("Failed to create decoder")
+ }
+
+ // Verify we can now fetch the previously missing chunk through recovery
+ recoveredChunk, err := decoder.Get(ctx, addresses[missingChunkIndex])
+ if err != nil {
+ t.Fatalf("Failed to recover missing chunk: %v", err)
+ }
+ // Verify the recovered chunk has the correct content
+ if !recoveredChunk.Address().Equal(addresses[missingChunkIndex]) {
+ t.Fatalf("Recovered chunk has incorrect address")
+ }
+
+ // Verify the recovered chunk has the correct content
+ recoveredData := recoveredChunk.Data()
+ expectedData := chunks[missingChunkIndex].Data()
+ if len(recoveredData) != len(expectedData) {
+ t.Fatalf("Recovered chunk has incorrect data length: got %d, want %d", len(recoveredData), len(expectedData))
+ }
+ if !bytes.Equal(recoveredData, expectedData) {
+ t.Fatalf("Recovered chunk has incorrect data")
+ }
+ // Check if the recovered chunk is now in the store
+ _, err = mockStore.Get(ctx, addresses[missingChunkIndex])
+ if err != nil {
+ t.Fatalf("Recovered chunk not saved to store: %v", err)
+ }
+
+ // Step 2: The original decoder should be automatically nulled after successful recovery
+ // This is an internal state check, we can't directly test it but we can verify that
+ // we can still access the chunks
+
+ // Sanity check - verify we can still fetch chunks through the cache
+ for i := range dataShardCount {
+ _, err := decoder.Get(ctx, addresses[i])
+ if err != nil {
+ t.Fatalf("Failed to get chunk %d after recovery: %v", i, err)
+ }
+ }
+
+ // Step 3: Testing ReDecoder fallback
+ newDecoder := j.GetOrCreate(addresses, dataShardCount)
+ if newDecoder == nil {
+ t.Fatal("Failed to create ReDecoder")
+ }
+
+ // Verify all chunks can be fetched through the ReDecoder
+ for i := range dataShardCount {
+ _, err := newDecoder.Get(ctx, addresses[i])
+ if err != nil {
+ t.Fatalf("Failed to get chunk %d through ReDecoder: %v", i, err)
+ }
+ }
+
+ // Verify that we can also access the first missing chunk - now from the store
+ // This would be using the local store and not network or recovery mechanisms
+ retrievedChunk, err := newDecoder.Get(ctx, addresses[missingChunkIndex])
+ if err != nil {
+ t.Fatalf("Failed to retrieve previously recovered chunk: %v", err)
+ }
+
+ if !retrievedChunk.Address().Equal(addresses[missingChunkIndex]) {
+ t.Fatalf("Retrieved chunk has incorrect address")
+ }
+
+ // Also verify the data content matches
+ retrievedData := retrievedChunk.Data()
+ expectedData = chunks[missingChunkIndex].Data()
+ if len(retrievedData) != len(expectedData) {
+ t.Fatalf("Retrieved chunk has incorrect data length: got %d, want %d", len(retrievedData), len(expectedData))
+ }
+ if !bytes.Equal(retrievedData, expectedData) {
+ t.Fatalf("Retrieved chunk has incorrect data")
+ }
+}
+
+// Mock implementation of storage.Getter for testing
+type mockNetworkFetcher struct {
+ allAddresses []swarm.Address
+ missingAddr swarm.Address
+}
+
+// newMockNetworkFetcher creates a new mock fetcher that will return ErrNotFound for specific addresses
+func newMockNetworkFetcher(allAddrs []swarm.Address, missingAddr swarm.Address) *mockNetworkFetcher {
+ return &mockNetworkFetcher{
+ allAddresses: allAddrs,
+ missingAddr: missingAddr,
+ }
+}
+
+// Get implements storage.Getter interface
+func (m *mockNetworkFetcher) Get(ctx context.Context, addr swarm.Address) (swarm.Chunk, error) {
+ // Simulate network fetch - fail for the missing chunk
+ if addr.Equal(m.missingAddr) {
+ return nil, storage.ErrNotFound
+ }
+
+ // Find the index of this address in the all addresses list
+ var index int
+ for i, a := range m.allAddresses {
+ if addr.Equal(a) {
+ index = i
+ break
+ }
+ }
+
+ // Generate data using the same pattern as in the test
+ data := make([]byte, swarm.ChunkWithSpanSize)
+ // Create a unique string for this shard
+ testData := []byte("test-data-" + strconv.Itoa(index))
+ // Copy as much as will fit
+ copy(data, testData)
+
+ return swarm.NewChunk(addr, data), nil
+}
diff --git a/pkg/file/pipeline/builder/builder_test.go b/pkg/file/pipeline/builder/builder_test.go
index 17091bcc0d7..5b5d4321c63 100644
--- a/pkg/file/pipeline/builder/builder_test.go
+++ b/pkg/file/pipeline/builder/builder_test.go
@@ -140,7 +140,7 @@ func BenchmarkPipeline(b *testing.B) {
100000000, // 100 mb
} {
b.Run(strconv.Itoa(count)+"-bytes", func(b *testing.B) {
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
benchmarkPipeline(b, count)
}
})
diff --git a/pkg/file/pipeline/encryption/encryption_test.go b/pkg/file/pipeline/encryption/encryption_test.go
index edcfa33a46f..2010f6b8327 100644
--- a/pkg/file/pipeline/encryption/encryption_test.go
+++ b/pkg/file/pipeline/encryption/encryption_test.go
@@ -38,7 +38,7 @@ func init() {
}
}
-// TestEncyrption tests that the encryption writer works correctly.
+// TestEncryption tests that the encryption writer works correctly.
func TestEncryption(t *testing.T) {
t.Parallel()
diff --git a/pkg/file/pipeline/feeder/feeder.go b/pkg/file/pipeline/feeder/feeder.go
index 3d84f597d44..6ba722796f2 100644
--- a/pkg/file/pipeline/feeder/feeder.go
+++ b/pkg/file/pipeline/feeder/feeder.go
@@ -21,7 +21,7 @@ type chunkFeeder struct {
wrote int64
}
-// newChunkFeederWriter creates a new chunkFeeder that allows for partial
+// NewChunkFeederWriter creates a new chunkFeeder that allows for partial
// writes into the pipeline. Any pending data in the buffer is flushed to
// subsequent writers when Sum() is called.
func NewChunkFeederWriter(size int, next pipeline.ChainWriter) pipeline.Interface {
diff --git a/pkg/file/pipeline/hashtrie/hashtrie.go b/pkg/file/pipeline/hashtrie/hashtrie.go
index 7ef5eb350a1..9c2d17fb5fc 100644
--- a/pkg/file/pipeline/hashtrie/hashtrie.go
+++ b/pkg/file/pipeline/hashtrie/hashtrie.go
@@ -109,7 +109,7 @@ func (h *hashTrieWriter) writeToDataLevel(span, ref, key, data []byte) error {
return h.rParams.ChunkWrite(0, data, h.parityChunkFn)
}
-// wrapLevel wraps an existing level and writes the resulting hash to the following level
+// wrapFullLevel wraps an existing level and writes the resulting hash to the following level
// then truncates the current level data by shifting the cursors.
// Steps are performed in the following order:
// - take all of the data in the current level
@@ -198,11 +198,11 @@ func (h *hashTrieWriter) wrapFullLevel(level int) error {
func (h *hashTrieWriter) Sum() ([]byte, error) {
for i := 1; i < maxLevel; i++ {
l := h.chunkCounters[i]
- switch {
- case l == 0:
+ switch l {
+ case 0:
// level empty, continue to the next.
continue
- case l == h.maxChildrenChunks:
+ case h.maxChildrenChunks:
// this case is possible and necessary due to the carry over
// in the next switch case statement. normal writes done
// through writeToLevel will automatically wrap a full level.
@@ -211,7 +211,7 @@ func (h *hashTrieWriter) Sum() ([]byte, error) {
if err != nil {
return nil, err
}
- case l == 1:
+ case 1:
// this cursor assignment basically means:
// take the hash|span|key from this level, and append it to
// the data of the next level. you may wonder how this works:
diff --git a/pkg/file/pipeline/hashtrie/hashtrie_test.go b/pkg/file/pipeline/hashtrie/hashtrie_test.go
index edbe3c9e2e5..9f502f3fd5d 100644
--- a/pkg/file/pipeline/hashtrie/hashtrie_test.go
+++ b/pkg/file/pipeline/hashtrie/hashtrie_test.go
@@ -198,7 +198,7 @@ func TestLevels_TrieFull(t *testing.T) {
)
// to create a level wrap we need to do branching^(level-1) writes
- for i := 0; i < writes; i++ {
+ for range writes {
a := &pipeline.PipeWriteArgs{Ref: addr.Bytes(), Span: span}
err := ht.ChainWrite(a)
if err != nil {
@@ -239,7 +239,7 @@ func TestRegression(t *testing.T) {
)
binary.LittleEndian.PutUint64(span, 4096)
- for i := 0; i < writes; i++ {
+ for range writes {
a := &pipeline.PipeWriteArgs{Ref: addr.Bytes(), Span: span}
err := ht.ChainWrite(a)
if err != nil {
diff --git a/pkg/file/redundancy/getter/getter.go b/pkg/file/redundancy/getter/getter.go
index 39bf23f87c3..30ec23c00b3 100644
--- a/pkg/file/redundancy/getter/getter.go
+++ b/pkg/file/redundancy/getter/getter.go
@@ -69,12 +69,12 @@ func New(addrs []swarm.Address, shardCnt int, g storage.Getter, p storage.Putter
}
// after init, cache and wait channels are immutable, need no locking
- for i := 0; i < shardCnt; i++ {
+ for i := range shardCnt {
d.cache[addrs[i].ByteString()] = i
}
// after init, cache and wait channels are immutable, need no locking
- for i := 0; i < size; i++ {
+ for i := range size {
d.waits[i] = make(chan error)
}
@@ -340,7 +340,7 @@ func (g *decoder) setData(i int, chdata []byte) {
func (g *decoder) getData(i int) []byte {
g.mu.Lock()
defer g.mu.Unlock()
- if i == g.shardCnt-1 && g.lastLen > 0 {
+ if i == g.shardCnt-1 && g.lastLen > 0 && g.rsbuf[i] != nil {
return g.rsbuf[i][:g.lastLen] // cut padding
}
return g.rsbuf[i]
diff --git a/pkg/file/redundancy/getter/getter_test.go b/pkg/file/redundancy/getter/getter_test.go
index 42566866a3e..f3c9a27c3ff 100644
--- a/pkg/file/redundancy/getter/getter_test.go
+++ b/pkg/file/redundancy/getter/getter_test.go
@@ -266,7 +266,7 @@ func initData(t *testing.T, buf [][]byte, shardCnt int, s storage.ChunkStore) []
spanBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(spanBytes, swarm.ChunkSize)
- for i := 0; i < len(buf); i++ {
+ for i := range buf {
buf[i] = make([]byte, swarm.ChunkWithSpanSize)
if i >= shardCnt {
continue
@@ -291,7 +291,7 @@ func initData(t *testing.T, buf [][]byte, shardCnt int, s storage.ChunkStore) []
// calculate chunk addresses and upload to the store
addrs := make([]swarm.Address, len(buf))
ctx := context.TODO()
- for i := 0; i < len(buf); i++ {
+ for i := range buf {
chunk, err := cac.NewWithDataSpan(buf[i])
if err != nil {
t.Fatal(err)
@@ -313,7 +313,7 @@ func checkShardsAvailable(t *testing.T, s storage.ChunkStore, addrs []swarm.Addr
eg.Go(func() (err error) {
var delay time.Duration
var ch swarm.Chunk
- for i := 0; i < 30; i++ {
+ for i := range 30 {
select {
case <-ctx.Done():
return ctx.Err()
diff --git a/pkg/file/redundancy/getter/redecoder.go b/pkg/file/redundancy/getter/redecoder.go
new file mode 100644
index 00000000000..0338a069841
--- /dev/null
+++ b/pkg/file/redundancy/getter/redecoder.go
@@ -0,0 +1,61 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package getter
+
+import (
+ "context"
+ "errors"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// Recovery is a function that creates a recovery decoder on demand
+type Recovery func() storage.Getter
+
+// ReDecoder is a wrapper around a Getter that first attempts to fetch a chunk directly
+// from the network, and only falls back to recovery if the network fetch fails.
+// This is used to handle cases where previously recovered chunks have been evicted from cache.
+type ReDecoder struct {
+ fetcher storage.Getter // Direct fetcher (usually netstore)
+ recovery Recovery // Factory function to create recovery decoder on demand
+ logger log.Logger
+}
+
+// NewReDecoder creates a new ReDecoder instance with the provided fetcher and recovery factory.
+// The recovery decoder will only be created if needed (when network fetch fails).
+func NewReDecoder(fetcher storage.Getter, recovery Recovery, logger log.Logger) *ReDecoder {
+ return &ReDecoder{
+ fetcher: fetcher,
+ recovery: recovery,
+ logger: logger,
+ }
+}
+
+// Get implements the storage.Getter interface.
+// It first attempts to fetch the chunk directly from the network.
+// If that fails with ErrNotFound, it then creates the recovery decoder and attempts to recover the chunk.
+func (rd *ReDecoder) Get(ctx context.Context, addr swarm.Address) (swarm.Chunk, error) {
+ // First try to get the chunk directly from the network
+ chunk, err := rd.fetcher.Get(ctx, addr)
+ if err == nil {
+ return chunk, nil
+ }
+
+ // Only attempt recovery if the chunk was not found
+ if !errors.Is(err, storage.ErrNotFound) {
+ return nil, err
+ }
+
+ // Log that we're falling back to recovery
+ rd.logger.Debug("chunk not found in network, creating recovery decoder", "address", addr)
+
+ // Create the recovery decoder on demand
+ recovery := rd.recovery()
+
+ // Attempt to recover the chunk
+ return recovery.Get(ctx, addr)
+}
diff --git a/pkg/file/redundancy/level.go b/pkg/file/redundancy/level.go
index 411da15ec98..c4eeacddc9f 100644
--- a/pkg/file/redundancy/level.go
+++ b/pkg/file/redundancy/level.go
@@ -29,6 +29,11 @@ const (
PARANOID
)
+// Validate validates the redundancy level
+func (l Level) Validate() bool {
+ return l >= NONE && l <= PARANOID
+}
+
// GetParities returns number of parities based on appendix F table 5
func (l Level) GetParities(shards int) int {
et, err := l.getErasureTable()
diff --git a/pkg/file/span.go b/pkg/file/span.go
index b8366233bd2..681cafb4acb 100644
--- a/pkg/file/span.go
+++ b/pkg/file/span.go
@@ -22,7 +22,7 @@ func GenerateSpanSizes(levels, branches int) []int64 {
spans := make([]int64, levels)
branchesSixtyfour := int64(branches)
var span int64 = 1
- for i := 0; i < 9; i++ {
+ for i := range 9 {
spans[i] = span
span *= branchesSixtyfour
}
diff --git a/pkg/file/splitter/internal/job_test.go b/pkg/file/splitter/internal/job_test.go
index bc7945c4b42..98a44ce3f51 100644
--- a/pkg/file/splitter/internal/job_test.go
+++ b/pkg/file/splitter/internal/job_test.go
@@ -29,8 +29,7 @@ func TestSplitterJobPartialSingleChunk(t *testing.T) {
store := inmemchunkstore.New()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
data := []byte("foo")
j := internal.NewSimpleSplitterJob(ctx, store, int64(len(data)), false)
@@ -83,10 +82,7 @@ func testSplitterJobVector(t *testing.T) {
j := internal.NewSimpleSplitterJob(ctx, store, int64(len(data)), false)
for i := 0; i < len(data); i += swarm.ChunkSize {
- l := swarm.ChunkSize
- if len(data)-i < swarm.ChunkSize {
- l = len(data) - i
- }
+ l := min(len(data)-i, swarm.ChunkSize)
c, err := j.Write(data[i : i+l])
if err != nil {
t.Fatal(err)
diff --git a/pkg/file/splitter/splitter_test.go b/pkg/file/splitter/splitter_test.go
index 04958f5dd82..c54991f9579 100644
--- a/pkg/file/splitter/splitter_test.go
+++ b/pkg/file/splitter/splitter_test.go
@@ -223,7 +223,7 @@ func BenchmarkSplitter(b *testing.B) {
100000000, // 100 mb
} {
b.Run(strconv.Itoa(count)+"-bytes", func(b *testing.B) {
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
benchmarkSplitter(b, count)
}
})
diff --git a/pkg/file/utils.go b/pkg/file/utils.go
index ce1f94d372a..b0e3071f98d 100644
--- a/pkg/file/utils.go
+++ b/pkg/file/utils.go
@@ -66,10 +66,8 @@ func ReferenceCount(span uint64, level redundancy.Level, encrytedChunk bool) (in
)
// search for branch level big enough to include span
branchLevel := 1
- for {
- if branchSize >= span {
- break
- }
+ for branchSize < span {
+
branchSize *= branching
branchLevel++
}
diff --git a/pkg/gsoc/gsoc.go b/pkg/gsoc/gsoc.go
index 6497c9b0c63..464681258e2 100644
--- a/pkg/gsoc/gsoc.go
+++ b/pkg/gsoc/gsoc.go
@@ -50,7 +50,7 @@ func (l *listener) Subscribe(address swarm.Address, handler Handler) (cleanup fu
defer l.handlersMu.Unlock()
h := l.handlers[address.ByteString()]
- for i := 0; i < len(h); i++ {
+ for i := range h {
if h[i] == &handler {
l.handlers[address.ByteString()] = append(h[:i], h[i+1:]...)
return
diff --git a/pkg/gsoc/gsoc_test.go b/pkg/gsoc/gsoc_test.go
index dfbe8e03a5c..dc49b0809a8 100644
--- a/pkg/gsoc/gsoc_test.go
+++ b/pkg/gsoc/gsoc_test.go
@@ -114,7 +114,7 @@ func ensureCalls(t *testing.T, calls *int, exp int) {
func waitHandlerCallback(t *testing.T, msgChan *chan struct{}, count int) {
t.Helper()
- for received := 0; received < count; received++ {
+ for range count {
select {
case <-*msgChan:
case <-time.After(1 * time.Second):
diff --git a/pkg/hive/hive.go b/pkg/hive/hive.go
index 27858cdcf87..8f8572bcb34 100644
--- a/pkg/hive/hive.go
+++ b/pkg/hive/hive.go
@@ -12,7 +12,6 @@ package hive
import (
"context"
- "encoding/hex"
"errors"
"fmt"
"sync"
@@ -36,13 +35,11 @@ import (
const loggerName = "hive"
const (
- protocolName = "hive"
- protocolVersion = "1.1.0"
- peersStreamName = "peers"
- messageTimeout = 1 * time.Minute // maximum allowed time for a message to be read or written.
- maxBatchSize = 30
- pingTimeout = time.Second * 15 // time to wait for ping to succeed
- batchValidationTimeout = 5 * time.Minute // prevent lock contention on peer validation
+ protocolName = "hive"
+ protocolVersion = "1.1.0"
+ peersStreamName = "peers"
+ messageTimeout = 1 * time.Minute // maximum allowed time for a message to be read or written.
+ maxBatchSize = 30
)
var (
@@ -53,7 +50,7 @@ var (
)
type Service struct {
- streamer p2p.StreamerPinger
+ streamer p2p.Bee260CompatibilityStreamer
addressBook addressbook.GetPutter
addPeersHandler func(...swarm.Address)
networkID uint64
@@ -67,9 +64,10 @@ type Service struct {
sem *semaphore.Weighted
bootnode bool
allowPrivateCIDRs bool
+ overlay swarm.Address
}
-func New(streamer p2p.StreamerPinger, addressbook addressbook.GetPutter, networkID uint64, bootnode bool, allowPrivateCIDRs bool, logger log.Logger) *Service {
+func New(streamer p2p.Bee260CompatibilityStreamer, addressbook addressbook.GetPutter, networkID uint64, bootnode bool, allowPrivateCIDRs bool, overlay swarm.Address, logger log.Logger) *Service {
svc := &Service{
streamer: streamer,
logger: logger.WithName(loggerName).Register(),
@@ -83,6 +81,7 @@ func New(streamer p2p.StreamerPinger, addressbook addressbook.GetPutter, network
sem: semaphore.NewWeighted(int64(swarm.MaxBins)),
bootnode: bootnode,
allowPrivateCIDRs: allowPrivateCIDRs,
+ overlay: overlay,
}
if !bootnode {
@@ -177,6 +176,11 @@ func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swa
w, _ := protobuf.NewWriterAndReader(stream)
var peersRequest pb.Peers
for _, p := range peers {
+ if p.Equal(s.overlay) {
+ s.logger.Debug("skipping self-address in broadcast", "overlay", p.String())
+ continue
+ }
+
addr, err := s.addressBook.Get(p)
if err != nil {
if errors.Is(err, addressbook.ErrNotFound) {
@@ -186,13 +190,17 @@ func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swa
return err
}
- if !s.allowPrivateCIDRs && manet.IsPrivateAddr(addr.Underlay) {
- continue // Don't advertise private CIDRs to the public network.
+ advertisableUnderlays := s.filterAdvertisableUnderlays(addr.Underlays)
+ if len(advertisableUnderlays) == 0 {
+ s.logger.Debug("skipping peers: no advertisable underlays", "peer_address", p)
+ continue
}
+ advertisableUnderlays = p2p.FilterBee260CompatibleUnderlays(s.streamer.IsBee260(peer), advertisableUnderlays)
+
peersRequest.Peers = append(peersRequest.Peers, &pb.BzzAddress{
Overlay: addr.Overlay.Bytes(),
- Underlay: addr.Underlay.Bytes(),
+ Underlay: bzz.SerializeUnderlays(advertisableUnderlays),
Signature: addr.Signature,
Nonce: addr.Nonce,
})
@@ -249,111 +257,90 @@ func (s *Service) disconnect(peer p2p.Peer) error {
func (s *Service) startCheckPeersHandler() {
ctx, cancel := context.WithCancel(context.Background())
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
+ s.wg.Go(func() {
<-s.quit
cancel()
- }()
+ })
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
+ s.wg.Go(func() {
for {
select {
case <-ctx.Done():
return
case newPeers := <-s.peersChan:
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- cctx, cancel := context.WithTimeout(ctx, batchValidationTimeout)
- defer cancel()
- s.checkAndAddPeers(cctx, newPeers)
- }()
+ s.wg.Go(func() {
+ s.checkAndAddPeers(newPeers)
+ })
}
}
- }()
+ })
}
-func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) {
- var peersToAdd []swarm.Address
- mtx := sync.Mutex{}
- wg := sync.WaitGroup{}
-
- addPeer := func(newPeer *pb.BzzAddress, multiUnderlay ma.Multiaddr) {
- err := s.sem.Acquire(ctx, 1)
- if err != nil {
- return
- }
-
- wg.Add(1)
- go func() {
- s.metrics.PeerConnectAttempts.Inc()
-
- defer func() {
- s.sem.Release(1)
- wg.Done()
- }()
-
- ctx, cancel := context.WithTimeout(ctx, pingTimeout)
- defer cancel()
-
- start := time.Now()
-
- // check if the underlay is usable by doing a raw ping using libp2p
- if _, err := s.streamer.Ping(ctx, multiUnderlay); err != nil {
- s.metrics.PingFailureTime.Observe(time.Since(start).Seconds())
- s.metrics.UnreachablePeers.Inc()
- s.logger.Debug("unreachable peer underlay", "peer_address", hex.EncodeToString(newPeer.Overlay), "underlay", multiUnderlay)
- return
- }
- s.metrics.PingTime.Observe(time.Since(start).Seconds())
-
- s.metrics.ReachablePeers.Inc()
-
- bzzAddress := bzz.Address{
- Overlay: swarm.NewAddress(newPeer.Overlay),
- Underlay: multiUnderlay,
- Signature: newPeer.Signature,
- Nonce: newPeer.Nonce,
- }
-
- err := s.addressBook.Put(bzzAddress.Overlay, bzzAddress)
- if err != nil {
- s.metrics.StorePeerErr.Inc()
- s.logger.Warning("skipping peer in response", "peer_address", newPeer.String(), "error", err)
- return
- }
-
- mtx.Lock()
- peersToAdd = append(peersToAdd, bzzAddress.Overlay)
- mtx.Unlock()
- }()
- }
+func (s *Service) checkAndAddPeers(peers pb.Peers) {
+ peersToAdd := make([]swarm.Address, 0, len(peers.Peers))
for _, p := range peers.Peers {
-
- multiUnderlay, err := ma.NewMultiaddrBytes(p.Underlay)
+ multiUnderlays, err := bzz.DeserializeUnderlays(p.Underlay)
if err != nil {
s.metrics.PeerUnderlayErr.Inc()
s.logger.Debug("multi address underlay", "error", err)
continue
}
+ if len(multiUnderlays) == 0 {
+ s.logger.Debug("check and add peers, no underlays", "overlay", swarm.NewAddress(p.Overlay).String())
+ continue // no underlays sent
+ }
+
// if peer exists already in the addressBook
// and if the underlays match, skip
addr, err := s.addressBook.Get(swarm.NewAddress(p.Overlay))
- if err == nil && addr.Underlay.Equal(multiUnderlay) {
+ if err == nil && bzz.AreUnderlaysEqual(addr.Underlays, multiUnderlays) {
+ continue
+ }
+
+ overlayAddr := swarm.NewAddress(p.Overlay)
+
+ if overlayAddr.Equal(s.overlay) {
+ s.logger.Debug("skipping self-address in peer list", "overlay", overlayAddr.String())
continue
}
- // add peer does not exist in the addressbook
- addPeer(p, multiUnderlay)
+ bzzAddress := bzz.Address{
+ Overlay: overlayAddr,
+ Underlays: multiUnderlays,
+ Signature: p.Signature,
+ Nonce: p.Nonce,
+ }
+
+ if err := s.addressBook.Put(bzzAddress.Overlay, bzzAddress); err != nil {
+ s.metrics.StorePeerErr.Inc()
+ s.logger.Warning("put peer in addressbook", "peer_address", p.String(), "error", err)
+ continue
+ }
+
+ peersToAdd = append(peersToAdd, bzzAddress.Overlay)
}
- wg.Wait()
if s.addPeersHandler != nil && len(peersToAdd) > 0 {
s.addPeersHandler(peersToAdd...)
}
}
+
+// filterAdvertisableUnderlays returns underlay addresses that can be advertised
+// based on the allowPrivateCIDRs setting. If allowPrivateCIDRs is false,
+// only public addresses are returned.
+func (s *Service) filterAdvertisableUnderlays(underlays []ma.Multiaddr) []ma.Multiaddr {
+ if s.allowPrivateCIDRs {
+ return underlays
+ }
+
+ var publicUnderlays []ma.Multiaddr
+ for _, u := range underlays {
+ if !manet.IsPrivateAddr(u) {
+ publicUnderlays = append(publicUnderlays, u)
+ }
+ }
+
+ return publicUnderlays
+}
diff --git a/pkg/hive/hive_test.go b/pkg/hive/hive_test.go
index fdb903cf7e5..ec603c8bb2b 100644
--- a/pkg/hive/hive_test.go
+++ b/pkg/hive/hive_test.go
@@ -7,7 +7,7 @@ package hive_test
import (
"bytes"
"context"
- "errors"
+ "encoding/hex"
"fmt"
"strconv"
"testing"
@@ -47,13 +47,12 @@ func TestHandlerRateLimit(t *testing.T) {
addressbookclean := ab.New(mock.NewStateStore())
- // new recorder for handling Ping
+ // new recorder
streamer := streamtest.New()
// create a hive server that handles the incoming stream
- server := hive.New(streamer, addressbookclean, networkID, false, true, logger)
- testutil.CleanupCloser(t, server)
-
serverAddress := swarm.RandAddress(t)
+ server := hive.New(streamer, addressbookclean, networkID, false, true, serverAddress, logger)
+ testutil.CleanupCloser(t, server)
// setup the stream recorder to record stream data
serverRecorder := streamtest.New(
@@ -63,8 +62,12 @@ func TestHandlerRateLimit(t *testing.T) {
peers := make([]swarm.Address, hive.LimitBurst+1)
for i := range peers {
+ underlay1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/" + strconv.Itoa(i))
+ if err != nil {
+ t.Fatal(err)
+ }
- underlay, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/" + strconv.Itoa(i))
+ underlay2, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/" + strconv.Itoa(i))
if err != nil {
t.Fatal(err)
}
@@ -77,7 +80,7 @@ func TestHandlerRateLimit(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- bzzAddr, err := bzz.NewAddress(signer, underlay, overlay, networkID, nonce)
+ bzzAddr, err := bzz.NewAddress(signer, []ma.Multiaddr{underlay1, underlay2}, overlay, networkID, nonce)
if err != nil {
t.Fatal(err)
}
@@ -90,7 +93,8 @@ func TestHandlerRateLimit(t *testing.T) {
}
// create a hive client that will do broadcast
- client := hive.New(serverRecorder, addressbook, networkID, false, true, logger)
+ clientAddress := swarm.RandAddress(t)
+ client := hive.New(serverRecorder, addressbook, networkID, false, true, clientAddress, logger)
err := client.BroadcastPeers(context.Background(), serverAddress, peers...)
if err != nil {
t.Fatal(err)
@@ -108,7 +112,6 @@ func TestHandlerRateLimit(t *testing.T) {
t.Fatal("want nil error")
}
}
-
func TestBroadcastPeers(t *testing.T) {
t.Parallel()
@@ -119,23 +122,40 @@ func TestBroadcastPeers(t *testing.T) {
// populate all expected and needed random resources for 2 full batches
// tests cases that uses fewer resources can use sub-slices of this data
- var bzzAddresses []bzz.Address
- var overlays []swarm.Address
- var wantMsgs []pb.Peers
+ bzzAddresses := make([]bzz.Address, 0, 2*hive.MaxBatchSize)
+ overlays := make([]swarm.Address, 0, 2*hive.MaxBatchSize)
+ wantMsgs := make([]pb.Peers, 0, 2*hive.MaxBatchSize)
- for i := 0; i < 2; i++ {
+ for range 2 {
wantMsgs = append(wantMsgs, pb.Peers{Peers: []*pb.BzzAddress{}})
}
+ last := 2*hive.MaxBatchSize - 1
+
for i := 0; i < 2*hive.MaxBatchSize; i++ {
- base := "/ip4/127.0.0.1/udp/"
- if i == 2*hive.MaxBatchSize-1 {
- base = "/ip4/1.1.1.1/udp/" // The last underlay has public address.
- }
- underlay, err := ma.NewMultiaddr(base + strconv.Itoa(i))
- if err != nil {
- t.Fatal(err)
+ var underlays []ma.Multiaddr
+ if i == last {
+ u, err := ma.NewMultiaddr("/ip4/1.1.1.1/udp/" + strconv.Itoa(i)) // public
+ if err != nil {
+ t.Fatal(err)
+ }
+ u2, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/" + strconv.Itoa(i))
+ if err != nil {
+ t.Fatal(err)
+ }
+ underlays = []ma.Multiaddr{u, u2}
+ } else {
+ n := (i % 3) + 1
+ for j := 0; j < n; j++ {
+ port := i + j*10000
+ u, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/" + strconv.Itoa(port))
+ if err != nil {
+ t.Fatal(err)
+ }
+ underlays = append(underlays, u)
+ }
}
+
pk, err := crypto.GenerateSecp256k1Key()
if err != nil {
t.Fatal(err)
@@ -145,21 +165,20 @@ func TestBroadcastPeers(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- bzzAddr, err := bzz.NewAddress(signer, underlay, overlay, networkID, nonce)
+ bzzAddr, err := bzz.NewAddress(signer, underlays, overlay, networkID, nonce)
if err != nil {
t.Fatal(err)
}
bzzAddresses = append(bzzAddresses, *bzzAddr)
overlays = append(overlays, bzzAddr.Overlay)
- err = addressbook.Put(bzzAddr.Overlay, *bzzAddr)
- if err != nil {
+ if err := addressbook.Put(bzzAddr.Overlay, *bzzAddr); err != nil {
t.Fatal(err)
}
wantMsgs[i/hive.MaxBatchSize].Peers = append(wantMsgs[i/hive.MaxBatchSize].Peers, &pb.BzzAddress{
Overlay: bzzAddresses[i].Overlay.Bytes(),
- Underlay: bzzAddresses[i].Underlay.Bytes(),
+ Underlay: bzz.SerializeUnderlays(bzzAddresses[i].Underlays),
Signature: bzzAddresses[i].Signature,
Nonce: nonce,
})
@@ -172,7 +191,6 @@ func TestBroadcastPeers(t *testing.T) {
wantOverlays []swarm.Address
wantBzzAddresses []bzz.Address
allowPrivateCIDRs bool
- pingErr func(addr ma.Multiaddr) (time.Duration, error)
}{
"OK - single record": {
addresee: swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c"),
@@ -214,23 +232,7 @@ func TestBroadcastPeers(t *testing.T) {
wantBzzAddresses: bzzAddresses[:2*hive.MaxBatchSize],
allowPrivateCIDRs: true,
},
- "OK - single batch - skip ping failures": {
- addresee: swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c"),
- peers: overlays[:15],
- wantMsgs: []pb.Peers{{Peers: wantMsgs[0].Peers[:15]}},
- wantOverlays: overlays[:10],
- wantBzzAddresses: bzzAddresses[:10],
- allowPrivateCIDRs: true,
- pingErr: func(addr ma.Multiaddr) (rtt time.Duration, err error) {
- for _, v := range bzzAddresses[10:15] {
- if v.Underlay.Equal(addr) {
- return rtt, errors.New("ping failure")
- }
- }
- return rtt, nil
- },
- },
- "Ok - don't advertise private CIDRs": {
+ "Ok - don't advertise private CIDRs only": {
addresee: overlays[len(overlays)-1],
peers: overlays[:15],
wantMsgs: []pb.Peers{{}},
@@ -238,6 +240,29 @@ func TestBroadcastPeers(t *testing.T) {
wantBzzAddresses: nil,
allowPrivateCIDRs: false,
},
+ "Ok - don't advertise private CIDRs only (but include one public peer)": {
+ addresee: overlays[0],
+ peers: overlays[58:],
+ wantMsgs: []pb.Peers{{Peers: []*pb.BzzAddress{
+ {
+ Overlay: bzzAddresses[len(bzzAddresses)-1].Overlay.Bytes(),
+ Underlay: bzz.SerializeUnderlays([]ma.Multiaddr{bzzAddresses[len(bzzAddresses)-1].Underlays[0]}),
+ Signature: bzzAddresses[len(bzzAddresses)-1].Signature,
+ Nonce: nonce,
+ },
+ }}},
+ wantOverlays: []swarm.Address{overlays[len(overlays)-1]},
+ wantBzzAddresses: []bzz.Address{
+ {
+ Underlays: []ma.Multiaddr{bzzAddresses[len(bzzAddresses)-1].Underlays[0]},
+ Overlay: bzzAddresses[len(bzzAddresses)-1].Overlay,
+ Signature: bzzAddresses[len(bzzAddresses)-1].Signature,
+ Nonce: bzzAddresses[len(bzzAddresses)-1].Nonce,
+ EthereumAddress: bzzAddresses[len(bzzAddresses)-1].EthereumAddress,
+ },
+ },
+ allowPrivateCIDRs: false,
+ },
}
for name, tc := range testCases {
@@ -246,15 +271,10 @@ func TestBroadcastPeers(t *testing.T) {
addressbookclean := ab.New(mock.NewStateStore())
- // new recorder for handling Ping
- var streamer *streamtest.Recorder
- if tc.pingErr != nil {
- streamer = streamtest.New(streamtest.WithPingErr(tc.pingErr))
- } else {
- streamer = streamtest.New()
- }
+ streamer := streamtest.New()
// create a hive server that handles the incoming stream
- server := hive.New(streamer, addressbookclean, networkID, false, true, logger)
+ serverAddress := swarm.RandAddress(t)
+ server := hive.New(streamer, addressbookclean, networkID, false, true, serverAddress, logger)
testutil.CleanupCloser(t, server)
// setup the stream recorder to record stream data
@@ -263,7 +283,9 @@ func TestBroadcastPeers(t *testing.T) {
)
// create a hive client that will do broadcast
- client := hive.New(recorder, addressbook, networkID, false, tc.allowPrivateCIDRs, logger)
+ clientAddress := swarm.RandAddress(t)
+ client := hive.New(recorder, addressbook, networkID, false, tc.allowPrivateCIDRs, clientAddress, logger)
+
if err := client.BroadcastPeers(context.Background(), tc.addresee, tc.peers...); err != nil {
t.Fatal(err)
}
@@ -284,10 +306,7 @@ func TestBroadcastPeers(t *testing.T) {
if err != nil {
t.Fatal(err)
}
-
- if fmt.Sprint(messages[0]) != fmt.Sprint(tc.wantMsgs[i]) {
- t.Errorf("Messages got %v, want %v", messages, tc.wantMsgs)
- }
+ comparePeerMsgs(t, messages[0].Peers, tc.wantMsgs[i].Peers)
}
expectOverlaysEventually(t, addressbookclean, tc.wantOverlays)
@@ -380,3 +399,269 @@ func readAndAssertPeersMsgs(in []byte, expectedLen int) ([]pb.Peers, error) {
return peers, nil
}
+
+func comparePeerMsgs(t *testing.T, got, want []*pb.BzzAddress) {
+ t.Helper()
+
+ gotMap := flattenByOverlay(t, got)
+ wantMap := flattenByOverlay(t, want)
+
+ for ovlHex, w := range wantMap {
+ g, ok := gotMap[ovlHex]
+ if !ok {
+ t.Fatalf("expected peer %s, but not found", ovlHex)
+ }
+ if !bytes.Equal(g.Underlay, w.Underlay) {
+ t.Fatalf("peer %s: underlays (got=%s want=%s)", ovlHex, g.Underlay, w.Underlay)
+ }
+ if !bytes.Equal(g.Signature, w.Signature) {
+ t.Fatalf("peer %s: expected signatures (got=%s want=%s)",
+ ovlHex, shortHex(g.Signature), shortHex(w.Signature))
+ }
+ if !bytes.Equal(g.Nonce, w.Nonce) {
+ t.Fatalf("peer %s: expected nonce (got=%s want=%s)",
+ ovlHex, shortHex(g.Nonce), shortHex(w.Nonce))
+ }
+ }
+}
+
+func flattenByOverlay(t *testing.T, batches []*pb.BzzAddress) map[string]*pb.BzzAddress {
+ t.Helper()
+
+ m := make(map[string]*pb.BzzAddress)
+ for _, batch := range batches {
+ overlay := hex.EncodeToString(batch.Overlay)
+ if _, ok := m[overlay]; ok {
+ t.Fatalf("multiple bzz addresses with the same overlay address: %s", overlay)
+ }
+ m[overlay] = batch
+ }
+ return m
+}
+
+func shortHex(b []byte) string {
+ s := hex.EncodeToString(b)
+ if len(s) > 32 {
+ return s[:32] + fmt.Sprintf("…(%dB)", len(b))
+ }
+ return s
+}
+
+// TestBroadcastPeersSkipsSelf verifies that hive does not broadcast self address
+// to other peers, preventing self-connection attempts.
+func TestBroadcastPeersSkipsSelf(t *testing.T) {
+ t.Parallel()
+
+ logger := log.Noop
+ statestore := mock.NewStateStore()
+ addressbook := ab.New(statestore)
+ networkID := uint64(1)
+ addressbookclean := ab.New(mock.NewStateStore())
+
+ // Create addresses
+ serverAddress := swarm.RandAddress(t)
+ clientAddress := swarm.RandAddress(t)
+
+ // Create a peer address
+ peer1 := swarm.RandAddress(t)
+ underlay1, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1234")
+ if err != nil {
+ t.Fatal(err)
+ }
+ pk, err := crypto.GenerateSecp256k1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+ signer := crypto.NewDefaultSigner(pk)
+ overlay1, err := crypto.NewOverlayAddress(pk.PublicKey, networkID, block)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bzzAddr1, err := bzz.NewAddress(signer, []ma.Multiaddr{underlay1}, overlay1, networkID, nonce)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := addressbook.Put(bzzAddr1.Overlay, *bzzAddr1); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create self address entry in addressbook
+ underlayClient, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/9999")
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkClient, err := crypto.GenerateSecp256k1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+ signerClient := crypto.NewDefaultSigner(pkClient)
+ bzzAddrClient, err := bzz.NewAddress(signerClient, []ma.Multiaddr{underlayClient}, clientAddress, networkID, nonce)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := addressbook.Put(clientAddress, *bzzAddrClient); err != nil {
+ t.Fatal(err)
+ }
+
+ // Setup server
+ streamer := streamtest.New()
+ server := hive.New(streamer, addressbookclean, networkID, false, true, serverAddress, logger)
+ testutil.CleanupCloser(t, server)
+
+ serverRecorder := streamtest.New(
+ streamtest.WithProtocols(server.Protocol()),
+ )
+
+ // Setup client
+ client := hive.New(serverRecorder, addressbook, networkID, false, true, clientAddress, logger)
+ testutil.CleanupCloser(t, client)
+
+ // Try to broadcast: peer1, clientAddress (self), and another peer
+ peersIncludingSelf := []swarm.Address{bzzAddr1.Overlay, clientAddress, peer1}
+
+ err = client.BroadcastPeers(context.Background(), serverAddress, peersIncludingSelf...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get records
+ records, err := serverRecorder.Records(serverAddress, "hive", "1.1.0", "peers")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(records) == 0 {
+ t.Fatal("expected at least one record")
+ }
+
+ // Read the messages
+ messages, err := readAndAssertPeersMsgs(records[0].In(), 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify that clientAddress (self) was NOT included in broadcast
+ for _, peerMsg := range messages[0].Peers {
+ receivedOverlay := swarm.NewAddress(peerMsg.Overlay)
+ if receivedOverlay.Equal(clientAddress) {
+ t.Fatal("self address should not be broadcast to peers")
+ }
+ }
+
+ // Verify server addressbook eventually contains only the valid peers, not self
+ err = spinlock.Wait(spinTimeout, func() bool {
+ overlays, err := addressbookclean.Overlays()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Should only have bzzAddr1, not clientAddress
+ for _, o := range overlays {
+ if o.Equal(clientAddress) {
+ return false // self should not be in addressbook
+ }
+ }
+ return true
+ })
+ if err != nil {
+ t.Fatal("self address found in server addressbook")
+ }
+}
+
+// TestReceivePeersSkipsSelf verifies that hive does not add self address
+// when receiving peer lists from other peers.
+func TestReceivePeersSkipsSelf(t *testing.T) {
+ t.Parallel()
+
+ logger := log.Noop
+ statestore := mock.NewStateStore()
+ addressbook := ab.New(statestore)
+ networkID := uint64(1)
+ addressbookclean := ab.New(mock.NewStateStore())
+
+ // Create addresses
+ serverAddress := swarm.RandAddress(t)
+ clientAddress := swarm.RandAddress(t)
+
+ // Create a valid peer
+ underlay1, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1234")
+ if err != nil {
+ t.Fatal(err)
+ }
+ pk1, err := crypto.GenerateSecp256k1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+ signer1 := crypto.NewDefaultSigner(pk1)
+ overlay1, err := crypto.NewOverlayAddress(pk1.PublicKey, networkID, block)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bzzAddr1, err := bzz.NewAddress(signer1, []ma.Multiaddr{underlay1}, overlay1, networkID, nonce)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := addressbook.Put(bzzAddr1.Overlay, *bzzAddr1); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create self address entry (serverAddress) that will be sent by client
+ underlayServer, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/8888")
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkServer, err := crypto.GenerateSecp256k1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+ signerServer := crypto.NewDefaultSigner(pkServer)
+ bzzAddrServer, err := bzz.NewAddress(signerServer, []ma.Multiaddr{underlayServer}, serverAddress, networkID, nonce)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Add server's own address to client's addressbook (so client can send it)
+ if err := addressbook.Put(serverAddress, *bzzAddrServer); err != nil {
+ t.Fatal(err)
+ }
+
+ // Setup server that will receive peers including its own address
+ streamer := streamtest.New()
+ server := hive.New(streamer, addressbookclean, networkID, false, true, serverAddress, logger)
+ testutil.CleanupCloser(t, server)
+
+ serverRecorder := streamtest.New(
+ streamtest.WithProtocols(server.Protocol()),
+ )
+
+ // Setup client
+ client := hive.New(serverRecorder, addressbook, networkID, false, true, clientAddress, logger)
+ testutil.CleanupCloser(t, client)
+
+ // Client broadcasts: valid peer and server's own address
+ peersIncludingSelf := []swarm.Address{bzzAddr1.Overlay, serverAddress}
+
+ err = client.BroadcastPeers(context.Background(), serverAddress, peersIncludingSelf...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Wait a bit for server to process
+ time.Sleep(100 * time.Millisecond)
+
+ // Verify server's addressbook does NOT contain its own address
+ overlays, err := addressbookclean.Overlays()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, o := range overlays {
+ if o.Equal(serverAddress) {
+ t.Fatal("server should not add its own address to addressbook when received from peer")
+ }
+ }
+
+ // Verify server does have the valid peer
+ _, err = addressbookclean.Get(bzzAddr1.Overlay)
+ if err != nil {
+ t.Fatalf("expected server to have valid peer in addressbook, got error: %v", err)
+ }
+}
diff --git a/pkg/jsonhttp/jsonhttp.go b/pkg/jsonhttp/jsonhttp.go
index d9806dca523..e555223e110 100644
--- a/pkg/jsonhttp/jsonhttp.go
+++ b/pkg/jsonhttp/jsonhttp.go
@@ -44,7 +44,7 @@ type StatusResponse struct {
}
// Respond writes a JSON-encoded body to http.ResponseWriter.
-func Respond(w http.ResponseWriter, statusCode int, response interface{}) {
+func Respond(w http.ResponseWriter, statusCode int, response any) {
if statusCode == 0 {
statusCode = http.StatusOK
}
@@ -88,32 +88,32 @@ func Respond(w http.ResponseWriter, statusCode int, response interface{}) {
}
// Continue writes a response with status code 100.
-func Continue(w http.ResponseWriter, response interface{}) {
+func Continue(w http.ResponseWriter, response any) {
Respond(w, http.StatusContinue, response)
}
// SwitchingProtocols writes a response with status code 101.
-func SwitchingProtocols(w http.ResponseWriter, response interface{}) {
+func SwitchingProtocols(w http.ResponseWriter, response any) {
Respond(w, http.StatusSwitchingProtocols, response)
}
// OK writes a response with status code 200.
-func OK(w http.ResponseWriter, response interface{}) {
+func OK(w http.ResponseWriter, response any) {
Respond(w, http.StatusOK, response)
}
// Created writes a response with status code 201.
-func Created(w http.ResponseWriter, response interface{}) {
+func Created(w http.ResponseWriter, response any) {
Respond(w, http.StatusCreated, response)
}
// Accepted writes a response with status code 202.
-func Accepted(w http.ResponseWriter, response interface{}) {
+func Accepted(w http.ResponseWriter, response any) {
Respond(w, http.StatusAccepted, response)
}
// NonAuthoritativeInfo writes a response with status code 203.
-func NonAuthoritativeInfo(w http.ResponseWriter, response interface{}) {
+func NonAuthoritativeInfo(w http.ResponseWriter, response any) {
Respond(w, http.StatusNonAuthoritativeInfo, response)
}
@@ -125,206 +125,206 @@ func NoContent(w http.ResponseWriter) {
}
// ResetContent writes a response with status code 205.
-func ResetContent(w http.ResponseWriter, response interface{}) {
+func ResetContent(w http.ResponseWriter, response any) {
Respond(w, http.StatusResetContent, response)
}
// PartialContent writes a response with status code 206.
-func PartialContent(w http.ResponseWriter, response interface{}) {
+func PartialContent(w http.ResponseWriter, response any) {
Respond(w, http.StatusPartialContent, response)
}
// MultipleChoices writes a response with status code 300.
-func MultipleChoices(w http.ResponseWriter, response interface{}) {
+func MultipleChoices(w http.ResponseWriter, response any) {
Respond(w, http.StatusMultipleChoices, response)
}
// MovedPermanently writes a response with status code 301.
-func MovedPermanently(w http.ResponseWriter, response interface{}) {
+func MovedPermanently(w http.ResponseWriter, response any) {
Respond(w, http.StatusMovedPermanently, response)
}
// Found writes a response with status code 302.
-func Found(w http.ResponseWriter, response interface{}) {
+func Found(w http.ResponseWriter, response any) {
Respond(w, http.StatusFound, response)
}
// SeeOther writes a response with status code 303.
-func SeeOther(w http.ResponseWriter, response interface{}) {
+func SeeOther(w http.ResponseWriter, response any) {
Respond(w, http.StatusSeeOther, response)
}
// NotModified writes a response with status code 304.
-func NotModified(w http.ResponseWriter, response interface{}) {
+func NotModified(w http.ResponseWriter, response any) {
Respond(w, http.StatusNotModified, response)
}
// UseProxy writes a response with status code 305.
-func UseProxy(w http.ResponseWriter, response interface{}) {
+func UseProxy(w http.ResponseWriter, response any) {
Respond(w, http.StatusUseProxy, response)
}
// TemporaryRedirect writes a response with status code 307.
-func TemporaryRedirect(w http.ResponseWriter, response interface{}) {
+func TemporaryRedirect(w http.ResponseWriter, response any) {
Respond(w, http.StatusTemporaryRedirect, response)
}
// PermanentRedirect writes a response with status code 308.
-func PermanentRedirect(w http.ResponseWriter, response interface{}) {
+func PermanentRedirect(w http.ResponseWriter, response any) {
Respond(w, http.StatusPermanentRedirect, response)
}
// BadRequest writes a response with status code 400.
-func BadRequest(w http.ResponseWriter, response interface{}) {
+func BadRequest(w http.ResponseWriter, response any) {
Respond(w, http.StatusBadRequest, response)
}
// Unauthorized writes a response with status code 401.
-func Unauthorized(w http.ResponseWriter, response interface{}) {
+func Unauthorized(w http.ResponseWriter, response any) {
Respond(w, http.StatusUnauthorized, response)
}
// PaymentRequired writes a response with status code 402.
-func PaymentRequired(w http.ResponseWriter, response interface{}) {
+func PaymentRequired(w http.ResponseWriter, response any) {
Respond(w, http.StatusPaymentRequired, response)
}
// Forbidden writes a response with status code 403.
-func Forbidden(w http.ResponseWriter, response interface{}) {
+func Forbidden(w http.ResponseWriter, response any) {
Respond(w, http.StatusForbidden, response)
}
// NotFound writes a response with status code 404.
-func NotFound(w http.ResponseWriter, response interface{}) {
+func NotFound(w http.ResponseWriter, response any) {
Respond(w, http.StatusNotFound, response)
}
// MethodNotAllowed writes a response with status code 405.
-func MethodNotAllowed(w http.ResponseWriter, response interface{}) {
+func MethodNotAllowed(w http.ResponseWriter, response any) {
Respond(w, http.StatusMethodNotAllowed, response)
}
// NotAcceptable writes a response with status code 406.
-func NotAcceptable(w http.ResponseWriter, response interface{}) {
+func NotAcceptable(w http.ResponseWriter, response any) {
Respond(w, http.StatusNotAcceptable, response)
}
// ProxyAuthRequired writes a response with status code 407.
-func ProxyAuthRequired(w http.ResponseWriter, response interface{}) {
+func ProxyAuthRequired(w http.ResponseWriter, response any) {
Respond(w, http.StatusProxyAuthRequired, response)
}
// RequestTimeout writes a response with status code 408.
-func RequestTimeout(w http.ResponseWriter, response interface{}) {
+func RequestTimeout(w http.ResponseWriter, response any) {
Respond(w, http.StatusRequestTimeout, response)
}
// Conflict writes a response with status code 409.
-func Conflict(w http.ResponseWriter, response interface{}) {
+func Conflict(w http.ResponseWriter, response any) {
Respond(w, http.StatusConflict, response)
}
// Gone writes a response with status code 410.
-func Gone(w http.ResponseWriter, response interface{}) {
+func Gone(w http.ResponseWriter, response any) {
Respond(w, http.StatusGone, response)
}
// LengthRequired writes a response with status code 411.
-func LengthRequired(w http.ResponseWriter, response interface{}) {
+func LengthRequired(w http.ResponseWriter, response any) {
Respond(w, http.StatusLengthRequired, response)
}
// PreconditionFailed writes a response with status code 412.
-func PreconditionFailed(w http.ResponseWriter, response interface{}) {
+func PreconditionFailed(w http.ResponseWriter, response any) {
Respond(w, http.StatusPreconditionFailed, response)
}
// RequestEntityTooLarge writes a response with status code 413.
-func RequestEntityTooLarge(w http.ResponseWriter, response interface{}) {
+func RequestEntityTooLarge(w http.ResponseWriter, response any) {
Respond(w, http.StatusRequestEntityTooLarge, response)
}
// RequestURITooLong writes a response with status code 414.
-func RequestURITooLong(w http.ResponseWriter, response interface{}) {
+func RequestURITooLong(w http.ResponseWriter, response any) {
Respond(w, http.StatusRequestURITooLong, response)
}
// UnsupportedMediaType writes a response with status code 415.
-func UnsupportedMediaType(w http.ResponseWriter, response interface{}) {
+func UnsupportedMediaType(w http.ResponseWriter, response any) {
Respond(w, http.StatusUnsupportedMediaType, response)
}
// RequestedRangeNotSatisfiable writes a response with status code 416.
-func RequestedRangeNotSatisfiable(w http.ResponseWriter, response interface{}) {
+func RequestedRangeNotSatisfiable(w http.ResponseWriter, response any) {
Respond(w, http.StatusRequestedRangeNotSatisfiable, response)
}
// ExpectationFailed writes a response with status code 417.
-func ExpectationFailed(w http.ResponseWriter, response interface{}) {
+func ExpectationFailed(w http.ResponseWriter, response any) {
Respond(w, http.StatusExpectationFailed, response)
}
// Teapot writes a response with status code 418.
-func Teapot(w http.ResponseWriter, response interface{}) {
+func Teapot(w http.ResponseWriter, response any) {
Respond(w, http.StatusTeapot, response)
}
// UnprocessableEntity writes a response with status code 422.
-func UnprocessableEntity(w http.ResponseWriter, response interface{}) {
+func UnprocessableEntity(w http.ResponseWriter, response any) {
Respond(w, http.StatusUnprocessableEntity, response)
}
// UpgradeRequired writes a response with status code 426.
-func UpgradeRequired(w http.ResponseWriter, response interface{}) {
+func UpgradeRequired(w http.ResponseWriter, response any) {
Respond(w, http.StatusUpgradeRequired, response)
}
// PreconditionRequired writes a response with status code 428.
-func PreconditionRequired(w http.ResponseWriter, response interface{}) {
+func PreconditionRequired(w http.ResponseWriter, response any) {
Respond(w, http.StatusPreconditionRequired, response)
}
// TooManyRequests writes a response with status code 429.
-func TooManyRequests(w http.ResponseWriter, response interface{}) {
+func TooManyRequests(w http.ResponseWriter, response any) {
Respond(w, http.StatusTooManyRequests, response)
}
// RequestHeaderFieldsTooLarge writes a response with status code 431.
-func RequestHeaderFieldsTooLarge(w http.ResponseWriter, response interface{}) {
+func RequestHeaderFieldsTooLarge(w http.ResponseWriter, response any) {
Respond(w, http.StatusRequestHeaderFieldsTooLarge, response)
}
// UnavailableForLegalReasons writes a response with status code 451.
-func UnavailableForLegalReasons(w http.ResponseWriter, response interface{}) {
+func UnavailableForLegalReasons(w http.ResponseWriter, response any) {
Respond(w, http.StatusUnavailableForLegalReasons, response)
}
// InternalServerError writes a response with status code 500.
-func InternalServerError(w http.ResponseWriter, response interface{}) {
+func InternalServerError(w http.ResponseWriter, response any) {
Respond(w, http.StatusInternalServerError, response)
}
// NotImplemented writes a response with status code 501.
-func NotImplemented(w http.ResponseWriter, response interface{}) {
+func NotImplemented(w http.ResponseWriter, response any) {
Respond(w, http.StatusNotImplemented, response)
}
// BadGateway writes a response with status code 502.
-func BadGateway(w http.ResponseWriter, response interface{}) {
+func BadGateway(w http.ResponseWriter, response any) {
Respond(w, http.StatusBadGateway, response)
}
// ServiceUnavailable writes a response with status code 503.
-func ServiceUnavailable(w http.ResponseWriter, response interface{}) {
+func ServiceUnavailable(w http.ResponseWriter, response any) {
Respond(w, http.StatusServiceUnavailable, response)
}
// GatewayTimeout writes a response with status code 504.
-func GatewayTimeout(w http.ResponseWriter, response interface{}) {
+func GatewayTimeout(w http.ResponseWriter, response any) {
Respond(w, http.StatusGatewayTimeout, response)
}
// HTTPVersionNotSupported writes a response with status code 505.
-func HTTPVersionNotSupported(w http.ResponseWriter, response interface{}) {
+func HTTPVersionNotSupported(w http.ResponseWriter, response any) {
Respond(w, http.StatusHTTPVersionNotSupported, response)
}
diff --git a/pkg/jsonhttp/jsonhttp_test.go b/pkg/jsonhttp/jsonhttp_test.go
index 61a17684e5e..43b7b90b5ad 100644
--- a/pkg/jsonhttp/jsonhttp_test.go
+++ b/pkg/jsonhttp/jsonhttp_test.go
@@ -134,7 +134,7 @@ func TestRespond_special(t *testing.T) {
for _, tc := range []struct {
name string
code int
- response interface{}
+ response any
wantMessage string
}{
{
@@ -245,7 +245,7 @@ func TestStandardHTTPResponds(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
- f func(w http.ResponseWriter, response interface{})
+ f func(w http.ResponseWriter, response any)
code int
}{
{f: jsonhttp.Continue, code: http.StatusContinue},
diff --git a/pkg/jsonhttp/jsonhttptest/jsonhttptest.go b/pkg/jsonhttp/jsonhttptest/jsonhttptest.go
index 1c436ec9373..9e64b0a8422 100644
--- a/pkg/jsonhttp/jsonhttptest/jsonhttptest.go
+++ b/pkg/jsonhttp/jsonhttptest/jsonhttptest.go
@@ -36,14 +36,15 @@ func Request(tb testing.TB, client *http.Client, method, url string, responseCod
}
}
- req, err := http.NewRequest(method, url, o.requestBody)
+ ctx := o.ctx
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ req, err := http.NewRequestWithContext(ctx, method, url, o.requestBody)
if err != nil {
tb.Fatal(err)
}
req.Header = o.requestHeaders
- if o.ctx != nil {
- req = req.WithContext(o.ctx)
- }
resp, err := client.Do(req)
if err != nil {
tb.Fatal(err)
@@ -156,7 +157,7 @@ func WithRequestBody(body io.Reader) Option {
// WithJSONRequestBody writes a request JSON-encoded body to the request made by
// the Request function.
-func WithJSONRequestBody(r interface{}) Option {
+func WithJSONRequestBody(r any) Option {
return optionFunc(func(o *options) error {
b, err := json.Marshal(r)
if err != nil {
@@ -255,7 +256,7 @@ func WithNonEmptyResponseHeader(key string) Option {
// WithExpectedJSONResponse validates that the response from the request in the
// Request function matches JSON-encoded body provided here.
-func WithExpectedJSONResponse(response interface{}) Option {
+func WithExpectedJSONResponse(response any) Option {
return optionFunc(func(o *options) error {
o.expectedJSONResponse = response
return nil
@@ -264,7 +265,7 @@ func WithExpectedJSONResponse(response interface{}) Option {
// WithUnmarshalJSONResponse unmarshals response body from the request in the
// Request function to the provided response. Response must be a pointer.
-func WithUnmarshalJSONResponse(response interface{}) Option {
+func WithUnmarshalJSONResponse(response any) Option {
return optionFunc(func(o *options) error {
o.unmarshalResponse = response
return nil
@@ -303,8 +304,8 @@ type options struct {
expectedResponseHeaders http.Header
nonEmptyResponseHeaders []string
expectedResponse []byte
- expectedJSONResponse interface{}
- unmarshalResponse interface{}
+ expectedJSONResponse any
+ unmarshalResponse any
responseBody *[]byte
noResponseBody bool
}
diff --git a/pkg/jsonhttp/jsonhttptest/testing_mock_test.go b/pkg/jsonhttp/jsonhttptest/testing_mock_test.go
index c496f13d8ae..0d375a926c3 100644
--- a/pkg/jsonhttp/jsonhttptest/testing_mock_test.go
+++ b/pkg/jsonhttp/jsonhttptest/testing_mock_test.go
@@ -62,11 +62,11 @@ func (m *mock) Helper() {
m.isHelper = true
}
-func (m *mock) Errorf(format string, args ...interface{}) {
+func (m *mock) Errorf(format string, args ...any) {
m.got.errors = append(m.got.errors, fmt.Sprintf(format, args...))
}
-func (m *mock) Fatal(args ...interface{}) {
+func (m *mock) Fatal(args ...any) {
m.got.fatal = fmt.Sprint(args...)
panic(errFailed) // terminate the goroutine to detect it in the assert function
}
diff --git a/pkg/keystore/file/key.go b/pkg/keystore/file/key.go
index dcbe22612f5..5844b0dd1ef 100644
--- a/pkg/keystore/file/key.go
+++ b/pkg/keystore/file/key.go
@@ -75,7 +75,7 @@ func encryptKey(k *ecdsa.PrivateKey, password string, edg keystore.EDG) ([]byte,
return nil, err
}
var addr []byte
- switch k.PublicKey.Curve {
+ switch k.Curve {
case btcec.S256():
a, err := crypto.NewEthereumAddress(k.PublicKey)
if err != nil {
@@ -89,7 +89,7 @@ func encryptKey(k *ecdsa.PrivateKey, password string, edg keystore.EDG) ([]byte,
}
addr = privKey.PublicKey().Bytes()
default:
- return nil, fmt.Errorf("unsupported curve: %v", k.PublicKey.Curve)
+ return nil, fmt.Errorf("unsupported curve: %v", k.Curve)
}
return json.Marshal(encryptedKey{
Address: hex.EncodeToString(addr),
diff --git a/pkg/log/formatter.go b/pkg/log/formatter.go
index bf0a71a9a7e..558e445376a 100644
--- a/pkg/log/formatter.go
+++ b/pkg/log/formatter.go
@@ -45,12 +45,12 @@ type Marshaler interface {
// with exported fields
//
// It may return any value of any type.
- MarshalLog() interface{}
+ MarshalLog() any
}
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
// E.g.: PseudoStruct{"f1", 1, "f2", true, "f3", []int{}}.
-type PseudoStruct []interface{}
+type PseudoStruct []any
// fmtOptions carries parameters which influence the way logs are generated/formatted.
type fmtOptions struct {
@@ -97,7 +97,7 @@ type formatter struct {
// render produces a log line where the base is
// never escaped; the opposite is true for args.
-func (f *formatter) render(base, args []interface{}) []byte {
+func (f *formatter) render(base, args []any) []byte {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.opts.jsonOutput {
buf.WriteByte('{')
@@ -116,7 +116,7 @@ func (f *formatter) render(base, args []interface{}) []byte {
// separator (which depends on the output format) before the first pair is
// written. If escapeKeys is true, the keys are assumed to have
// non-JSON-compatible characters in them and must be evaluated for escapes.
-func (f *formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) {
+func (f *formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
@@ -159,7 +159,7 @@ func (f *formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing
// prettyWithFlags prettifies the given value.
// TODO: This is not fast. Most of the overhead goes here.
-func (f *formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
+func (f *formatter) prettyWithFlags(value any, flags uint32, depth int) string {
const flagRawStruct = 0x1 // Do not print braces on structs.
if depth > f.opts.maxLogDepth {
@@ -387,12 +387,12 @@ func (f *formatter) caller() caller {
}
// nonStringKey converts non-string value v to string.
-func (f *formatter) nonStringKey(v interface{}) string {
+func (f *formatter) nonStringKey(v any) string {
return fmt.Sprintf("", f.snippet(v))
}
// snippet produces a short snippet string of an arbitrary value.
-func (f *formatter) snippet(v interface{}) string {
+func (f *formatter) snippet(v any) string {
const snipLen = 16
snip := f.prettyWithFlags(v, 0, 0)
@@ -405,7 +405,7 @@ func (f *formatter) snippet(v interface{}) string {
// sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key
// if needed).
-func (f *formatter) sanitize(kvList []interface{}) []interface{} {
+func (f *formatter) sanitize(kvList []any) []any {
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
@@ -466,7 +466,7 @@ func needsEscape(s string) bool {
}
// invokeMarshaler returns panic-safe output from the Marshaler.MarshalLog() method.
-func invokeMarshaler(m Marshaler) (ret interface{}) {
+func invokeMarshaler(m Marshaler) (ret any) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("", r)
diff --git a/pkg/log/formatter_test.go b/pkg/log/formatter_test.go
index 7dd44b87104..c78cce38453 100644
--- a/pkg/log/formatter_test.go
+++ b/pkg/log/formatter_test.go
@@ -36,7 +36,7 @@ func (p pointErr) MarshalText() ([]byte, error) {
// marshalerTest expect to result in the MarshalLog() value when logged.
type marshalerTest struct{ val string }
-func (marshalerTest) MarshalLog() interface{} {
+func (marshalerTest) MarshalLog() any {
return struct{ Inner string }{"I am a log.Marshaler"}
}
func (marshalerTest) String() string {
@@ -50,7 +50,7 @@ func (marshalerTest) Error() string {
// marshalerPanicTest expect this to result in a panic when logged.
type marshalerPanicTest struct{ val string }
-func (marshalerPanicTest) MarshalLog() interface{} {
+func (marshalerPanicTest) MarshalLog() any {
panic("marshalerPanicTest")
}
@@ -207,9 +207,9 @@ type (
Inner1Test `json:"inner1"`
Inner2Test `json:"-"`
Inner3Test `json:"-,"`
- Inner4Test `json:"inner4,omitempty"`
+ Inner4Test `json:"inner4"`
Inner5Test `json:","`
- Inner6Test `json:"inner6,omitempty"`
+ Inner6Test `json:"inner6"`
}
)
@@ -218,7 +218,7 @@ func TestPretty(t *testing.T) {
strPtr := func(s string) *string { return &s }
testCases := []struct {
- val interface{}
+ val any
exp string // used in testCases where JSON can't handle it
}{{
val: "strval",
@@ -369,11 +369,11 @@ func TestPretty(t *testing.T) {
val: struct {
A *int
B *int
- C interface{}
- D interface{}
+ C any
+ D any
}{
B: intPtr(1),
- D: interface{}(2),
+ D: any(2),
},
}, {
val: marshalerTest{"foobar"},
@@ -650,13 +650,13 @@ func TestPretty(t *testing.T) {
}
}
-func makeKV(args ...interface{}) []interface{} { return args }
+func makeKV(args ...any) []any { return args }
func TestRender(t *testing.T) {
testCases := []struct {
name string
- builtins []interface{}
- args []interface{}
+ builtins []any
+ args []any
wantKV string
wantJSON string
}{{
@@ -665,8 +665,8 @@ func TestRender(t *testing.T) {
wantJSON: "{}",
}, {
name: "empty",
- builtins: []interface{}{},
- args: []interface{}{},
+ builtins: []any{},
+ args: []any{},
wantKV: "",
wantJSON: "{}",
}, {
@@ -739,12 +739,12 @@ func TestRender(t *testing.T) {
func TestSanitize(t *testing.T) {
testCases := []struct {
name string
- kv []interface{}
- want []interface{}
+ kv []any
+ want []any
}{{
name: "empty",
- kv: []interface{}{},
- want: []interface{}{},
+ kv: []any{},
+ want: []any{},
}, {
name: "already sane",
kv: makeKV("int", 1, "str", "ABC", "bool", true),
diff --git a/pkg/log/httpaccess/http_access.go b/pkg/log/httpaccess/http_access.go
index 668b449813a..fbc350dba60 100644
--- a/pkg/log/httpaccess/http_access.go
+++ b/pkg/log/httpaccess/http_access.go
@@ -58,7 +58,7 @@ func NewHTTPAccessLogHandler(logger log.Logger, tracer *tracing.Tracer, message
ip = r.RemoteAddr
}
- fields := []interface{}{
+ fields := []any{
"ip", ip,
"method", r.Method,
"host", r.Host,
diff --git a/pkg/log/log.go b/pkg/log/log.go
index 5a517d02a33..0efa22c6127 100644
--- a/pkg/log/log.go
+++ b/pkg/log/log.go
@@ -116,7 +116,7 @@ type Builder interface {
// WithValues specifies additional key/value pairs
// to be logged with each log line.
- WithValues(keysAndValues ...interface{}) Builder
+ WithValues(keysAndValues ...any) Builder
// Build returns a new or existing Logger
// instance, if such instance already exists.
@@ -143,28 +143,28 @@ type Logger interface {
// the log line. The key/value pairs can then be used to add additional
// variable information. The key/value pairs must alternate string keys
// and arbitrary values.
- Debug(msg string, keysAndValues ...interface{})
+ Debug(msg string, keysAndValues ...any)
// Info logs an info message with the given key/value pairs as context.
// The msg argument should be used to add some constant description to
// the log line. The key/value pairs can then be used to add additional
// variable information. The key/value pairs must alternate string keys
// and arbitrary values.
- Info(msg string, keysAndValues ...interface{})
+ Info(msg string, keysAndValues ...any)
// Warning logs a warning message with the given key/value pairs as context.
// The msg argument should be used to add some constant description to
// the log line. The key/value pairs can then be used to add additional
// variable information. The key/value pairs must alternate string keys
// and arbitrary values.
- Warning(msg string, keysAndValues ...interface{})
+ Warning(msg string, keysAndValues ...any)
// Error logs an error, with the given message and key/value pairs as context.
// The msg argument should be used to add context to any underlying error,
// while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
- Error(err error, msg string, keysAndValues ...interface{})
+ Error(err error, msg string, keysAndValues ...any)
}
// Lock wraps io.Writer in a mutex to make it safe for concurrent use.
diff --git a/pkg/log/logger.go b/pkg/log/logger.go
index f40ffd59916..bb81d327172 100644
--- a/pkg/log/logger.go
+++ b/pkg/log/logger.go
@@ -58,7 +58,7 @@ type builder struct {
// values holds additional key/value pairs
// that are included on every log call.
- values []interface{}
+ values []any
// valuesStr is a cache of render values slice, so
// we don't have to render them on each Build call.
@@ -83,7 +83,7 @@ func (b *builder) WithName(name string) Builder {
}
// WithValues implements the Builder interface WithValues method.
-func (b *builder) WithValues(keysAndValues ...interface{}) Builder {
+func (b *builder) WithValues(keysAndValues ...any) Builder {
c := b.clone()
c.values = append(c.values, keysAndValues...)
return c
@@ -133,7 +133,7 @@ func (b *builder) clone() *builder {
c := *b
c.cloned = true
c.names = append(make([]string, 0, len(c.names)), c.names...)
- c.values = append(make([]interface{}, 0, len(c.values)), c.values...)
+ c.values = append(make([]any, 0, len(c.values)), c.values...)
return &c
}
@@ -176,7 +176,7 @@ func (l *logger) Verbosity() Level {
}
// Debug implements the Logger interface Debug method.
-func (l *logger) Debug(msg string, keysAndValues ...interface{}) {
+func (l *logger) Debug(msg string, keysAndValues ...any) {
if int(l.verbosity.get()) >= int(l.v) {
if err := l.log(VerbosityDebug, CategoryDebug, nil, msg, keysAndValues...); err != nil {
fmt.Fprintln(os.Stderr, err)
@@ -185,7 +185,7 @@ func (l *logger) Debug(msg string, keysAndValues ...interface{}) {
}
// Info implements the Logger interface Info method.
-func (l *logger) Info(msg string, keysAndValues ...interface{}) {
+func (l *logger) Info(msg string, keysAndValues ...any) {
if l.verbosity.get() >= VerbosityInfo {
if err := l.log(VerbosityInfo, CategoryInfo, nil, msg, keysAndValues...); err != nil {
fmt.Fprintln(os.Stderr, err)
@@ -194,7 +194,7 @@ func (l *logger) Info(msg string, keysAndValues ...interface{}) {
}
// Warning implements the Logger interface Warning method.
-func (l *logger) Warning(msg string, keysAndValues ...interface{}) {
+func (l *logger) Warning(msg string, keysAndValues ...any) {
if l.verbosity.get() >= VerbosityWarning {
if err := l.log(VerbosityWarning, CategoryWarning, nil, msg, keysAndValues...); err != nil {
fmt.Fprintln(os.Stderr, err)
@@ -203,7 +203,7 @@ func (l *logger) Warning(msg string, keysAndValues ...interface{}) {
}
// Error implements the Logger interface Error method.
-func (l *logger) Error(err error, msg string, keysAndValues ...interface{}) {
+func (l *logger) Error(err error, msg string, keysAndValues ...any) {
if l.verbosity.get() >= VerbosityError {
if err := l.log(VerbosityError, CategoryError, err, msg, keysAndValues...); err != nil {
fmt.Fprintln(os.Stderr, err)
@@ -218,8 +218,8 @@ func (l *logger) setVerbosity(v Level) {
// log logs the given msg and key-value pairs with the given level
// and the given message category caller (if enabled) to the sink.
-func (l *logger) log(vl Level, mc MessageCategory, err error, msg string, keysAndValues ...interface{}) error {
- base := make([]interface{}, 0, 14+len(keysAndValues))
+func (l *logger) log(vl Level, mc MessageCategory, err error, msg string, keysAndValues ...any) error {
+ base := make([]any, 0, 14+len(keysAndValues))
if l.formatter.opts.logTimestamp {
base = append(base, "time", time.Now().Format(l.formatter.opts.timestampLayout))
}
diff --git a/pkg/log/logger_test.go b/pkg/log/logger_test.go
index f7c23308808..e3921b60a84 100644
--- a/pkg/log/logger_test.go
+++ b/pkg/log/logger_test.go
@@ -30,8 +30,8 @@ func (h *hook) Fire(Level) error {
}
// applyError is a higher order function that returns the given fn with an applied err.
-func applyError(fn func(error, string, ...interface{}), err error) func(string, ...interface{}) {
- return func(msg string, kvs ...interface{}) {
+func applyError(fn func(error, string, ...any), err error) func(string, ...any) {
+ return func(msg string, kvs ...any) {
fn(err, msg, kvs...)
}
}
@@ -73,7 +73,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) {
logger, _ := newLogger(WithLevelHooks(VerbosityNone, &have))
tests := []struct {
- fn func(string, ...interface{})
+ fn func(string, ...any)
want bool
}{{
fn: logger.Build().Debug,
@@ -102,7 +102,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) {
logger, _ := newLogger(WithLevelHooks(VerbosityDebug, &have))
tests := []struct {
- fn func(string, ...interface{})
+ fn func(string, ...any)
want bool
}{{
fn: logger.Build().Debug,
@@ -131,7 +131,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) {
logger, _ := newLogger(WithLevelHooks(VerbosityInfo, &have))
tests := []struct {
- fn func(string, ...interface{})
+ fn func(string, ...any)
want bool
}{{
fn: logger.Build().Debug,
@@ -160,7 +160,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) {
logger, _ := newLogger(WithLevelHooks(VerbosityWarning, &have))
tests := []struct {
- fn func(string, ...interface{})
+ fn func(string, ...any)
want bool
}{{
fn: logger.Build().Debug,
@@ -189,7 +189,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) {
logger, _ := newLogger(WithLevelHooks(VerbosityError, &have))
tests := []struct {
- fn func(string, ...interface{})
+ fn func(string, ...any)
want bool
}{{
fn: logger.Build().Debug,
@@ -218,7 +218,7 @@ func TestLoggerOptionsLevelHooks(t *testing.T) {
logger, _ := newLogger(WithLevelHooks(VerbosityAll, &have))
tests := []struct {
- fn func(string, ...interface{})
+ fn func(string, ...any)
want bool
}{{
fn: logger.Build().Debug,
@@ -275,8 +275,8 @@ func TestLogger(t *testing.T) {
testCases := []struct {
name string
- logFn func(string, ...interface{})
- args []interface{}
+ logFn func(string, ...any)
+ args []any
want string
}{{
name: "just msg",
@@ -626,8 +626,8 @@ func TestLoggerWithName(t *testing.T) {
testCases := []struct {
name string
- logFn func(string, ...interface{})
- args []interface{}
+ logFn func(string, ...any)
+ args []any
want string
}{{
name: "one",
@@ -694,8 +694,8 @@ func TestLoggerWithValues(t *testing.T) {
testCases := []struct {
name string
- logFn func(string, ...interface{})
- args []interface{}
+ logFn func(string, ...any)
+ args []any
want string
}{{
name: "zero",
diff --git a/pkg/log/noop_logger.go b/pkg/log/noop_logger.go
index 590a717d722..96fbddefd34 100644
--- a/pkg/log/noop_logger.go
+++ b/pkg/log/noop_logger.go
@@ -9,14 +9,14 @@ var Noop Logger = new(noopLogger)
type noopLogger struct{}
-func (nl *noopLogger) V(_ uint) Builder { return nl }
-func (nl *noopLogger) WithName(_ string) Builder { return nl }
-func (nl *noopLogger) WithValues(_ ...interface{}) Builder { return nl }
-func (nl *noopLogger) Build() Logger { return nl }
-func (nl *noopLogger) Register() Logger { return nl }
+func (nl *noopLogger) V(_ uint) Builder { return nl }
+func (nl *noopLogger) WithName(_ string) Builder { return nl }
+func (nl *noopLogger) WithValues(_ ...any) Builder { return nl }
+func (nl *noopLogger) Build() Logger { return nl }
+func (nl *noopLogger) Register() Logger { return nl }
-func (nl *noopLogger) Verbosity() Level { return VerbosityNone }
-func (nl *noopLogger) Debug(_ string, _ ...interface{}) {}
-func (nl *noopLogger) Info(_ string, _ ...interface{}) {}
-func (nl *noopLogger) Warning(_ string, _ ...interface{}) {}
-func (nl *noopLogger) Error(_ error, _ string, _ ...interface{}) {}
+func (nl *noopLogger) Verbosity() Level { return VerbosityNone }
+func (nl *noopLogger) Debug(_ string, _ ...any) {}
+func (nl *noopLogger) Info(_ string, _ ...any) {}
+func (nl *noopLogger) Warning(_ string, _ ...any) {}
+func (nl *noopLogger) Error(_ error, _ string, _ ...any) {}
diff --git a/pkg/log/registry.go b/pkg/log/registry.go
index a467cfc39cf..bd7fa3587c6 100644
--- a/pkg/log/registry.go
+++ b/pkg/log/registry.go
@@ -124,7 +124,7 @@ func SetVerbosityByExp(e string, v Level) error {
}
var merr *multierror.Error
- loggers.Range(func(key, val interface{}) bool {
+ loggers.Range(func(key, val any) bool {
if rex.MatchString(key.(string)) {
merr = multierror.Append(merr, SetVerbosity(val.(*logger), v))
}
@@ -135,7 +135,7 @@ func SetVerbosityByExp(e string, v Level) error {
// RegistryIterate iterates through all registered loggers.
func RegistryIterate(fn func(id, path string, verbosity Level, v uint) (next bool)) {
- loggers.Range(func(_, val interface{}) bool {
+ loggers.Range(func(_, val any) bool {
l := val.(*logger)
return fn(l.id, l.namesStr, l.verbosity.get(), l.v)
})
diff --git a/pkg/log/registry_test.go b/pkg/log/registry_test.go
index dbfb1410d84..e83ac0bc588 100644
--- a/pkg/log/registry_test.go
+++ b/pkg/log/registry_test.go
@@ -75,10 +75,10 @@ func TestNewLogger(t *testing.T) {
var (
cnt int
- val interface{}
+ val any
)
NewLogger("root").Register()
- loggers.Range(func(k, v interface{}) bool {
+ loggers.Range(func(k, v any) bool {
cnt++
val = v
return true
@@ -119,7 +119,7 @@ func TestSetVerbosity(t *testing.T) {
NewLogger("root").WithName("child1").WithValues("abc", 123).Register()
registered := make(map[string]*logger)
- loggers.Range(func(k, v interface{}) bool {
+ loggers.Range(func(k, v any) bool {
registered[k.(string)] = v.(*logger)
return true
})
@@ -185,7 +185,7 @@ func TestRegistryRange(t *testing.T) {
NewLogger("root").WithName("child1").WithValues("abc", 123).Register()
registered := make(map[string]*logger)
- loggers.Range(func(k, v interface{}) bool {
+ loggers.Range(func(k, v any) bool {
registered[k.(string)] = v.(*logger)
return true
})
diff --git a/pkg/manifest/mantaray/marshal.go b/pkg/manifest/mantaray/marshal.go
index 4e0324c35dd..d95282135ad 100644
--- a/pkg/manifest/mantaray/marshal.go
+++ b/pkg/manifest/mantaray/marshal.go
@@ -12,6 +12,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math"
)
const (
@@ -162,10 +163,7 @@ func (n *Node) MarshalBinary() (bytes []byte, err error) {
copy(xorEncryptedBytes, bytes[0:nodeObfuscationKeySize])
for i := nodeObfuscationKeySize; i < len(bytes); i += nodeObfuscationKeySize {
- end := i + nodeObfuscationKeySize
- if end > len(bytes) {
- end = len(bytes)
- }
+ end := min(i+nodeObfuscationKeySize, len(bytes))
encrypted := encryptDecrypt(bytes[i:end], n.obfuscationKey)
copy(xorEncryptedBytes[i:end], encrypted)
@@ -192,11 +190,6 @@ func (bb *bitsForBytes) set(b byte) {
bb.bits[b/8] |= 1 << (b % 8)
}
-//nolint:unused
-func (bb *bitsForBytes) get(b byte) bool {
- return bb.getUint8(b)
-}
-
func (bb *bitsForBytes) getUint8(i uint8) bool {
return (bb.bits[i/8]>>(i%8))&1 > 0
}
@@ -228,10 +221,7 @@ func (n *Node) UnmarshalBinary(data []byte) error {
copy(xorDecryptedBytes, data[0:nodeObfuscationKeySize])
for i := nodeObfuscationKeySize; i < len(data); i += nodeObfuscationKeySize {
- end := i + nodeObfuscationKeySize
- if end > len(data) {
- end = len(data)
- }
+ end := min(i+nodeObfuscationKeySize, len(data))
decrypted := encryptDecrypt(data[i:end], n.obfuscationKey)
copy(xorDecryptedBytes[i:end], decrypted)
@@ -347,7 +337,7 @@ func (f *fork) fromBytes(b []byte) error {
f.prefix = b[nodeForkHeaderSize : nodeForkHeaderSize+prefixLen]
f.Node = NewNodeRef(b[nodeForkPreReferenceSize:])
- f.Node.nodeType = nodeType
+ f.nodeType = nodeType
return nil
}
@@ -362,7 +352,7 @@ func (f *fork) fromBytes02(b []byte, refBytesSize, metadataBytesSize int) error
f.prefix = b[nodeForkHeaderSize : nodeForkHeaderSize+prefixLen]
f.Node = NewNodeRef(b[nodeForkPreReferenceSize : nodeForkPreReferenceSize+refBytesSize])
- f.Node.nodeType = nodeType
+ f.nodeType = nodeType
if metadataBytesSize > 0 {
metadataBytes := b[nodeForkPreReferenceSize+refBytesSize+nodeForkMetadataBytesSize:]
@@ -374,7 +364,7 @@ func (f *fork) fromBytes02(b []byte, refBytesSize, metadataBytesSize int) error
return err
}
- f.Node.metadata = metadata
+ f.metadata = metadata
}
return nil
@@ -387,7 +377,7 @@ func (f *fork) bytes() (b []byte, err error) {
err = fmt.Errorf("node reference size > 256: %d", len(r))
return
}
- b = append(b, f.Node.nodeType, uint8(len(f.prefix)))
+ b = append(b, f.nodeType, uint8(len(f.prefix)))
prefixBytes := make([]byte, nodePrefixMaxSize)
copy(prefixBytes, f.prefix)
@@ -397,13 +387,16 @@ func (f *fork) bytes() (b []byte, err error) {
copy(refBytes, r)
b = append(b, refBytes...)
- if f.Node.IsWithMetadataType() {
+ if f.IsWithMetadataType() {
// using JSON encoding for metadata
- metadataJSONBytes, err1 := json.Marshal(f.Node.metadata)
+ metadataJSONBytes, err1 := json.Marshal(f.metadata)
if err1 != nil {
return b, err1
}
+ if len(metadataJSONBytes) > math.MaxInt-nodeForkMetadataBytesSize {
+ return nil, fmt.Errorf("metadata size overflow: %d", len(metadataJSONBytes))
+ }
metadataJSONBytesSizeWithSize := len(metadataJSONBytes) + nodeForkMetadataBytesSize
// pad JSON bytes if necessary
@@ -441,7 +434,7 @@ func (f *fork) bytes() (b []byte, err error) {
var refBytes = nodeRefBytes
func nodeRefBytes(f *fork) []byte {
- return f.Node.ref
+ return f.ref
}
// encryptDecrypt runs a XOR encryption on the input bytes, encrypting it if it
@@ -449,7 +442,7 @@ func nodeRefBytes(f *fork) []byte {
func encryptDecrypt(input, key []byte) []byte {
output := make([]byte, len(input))
- for i := 0; i < len(input); i++ {
+ for i := range input {
output[i] = input[i] ^ key[i%len(key)]
}
diff --git a/pkg/manifest/mantaray/marshal_test.go b/pkg/manifest/mantaray/marshal_test.go
index e0d728602fa..8d0cb827b84 100644
--- a/pkg/manifest/mantaray/marshal_test.go
+++ b/pkg/manifest/mantaray/marshal_test.go
@@ -177,7 +177,7 @@ func TestMarshal(t *testing.T) {
i++
return b
}
- for i := 0; i < len(testEntries); i++ {
+ for i := range testEntries {
c := testEntries[i].Path
e := testEntries[i].Entry
if len(e) == 0 {
diff --git a/pkg/manifest/mantaray/node.go b/pkg/manifest/mantaray/node.go
index 2e2754f200b..865a89a29eb 100644
--- a/pkg/manifest/mantaray/node.go
+++ b/pkg/manifest/mantaray/node.go
@@ -105,25 +105,10 @@ func (n *Node) makeWithMetadata() {
n.nodeType = n.nodeType | nodeTypeWithMetadata
}
-//nolint:unused
-func (n *Node) makeNotValue() {
- n.nodeType = (nodeTypeMask ^ nodeTypeValue) & n.nodeType
-}
-
-//nolint:unused
-func (n *Node) makeNotEdge() {
- n.nodeType = (nodeTypeMask ^ nodeTypeEdge) & n.nodeType
-}
-
func (n *Node) makeNotWithPathSeparator() {
n.nodeType = (nodeTypeMask ^ nodeTypeWithPathSeparator) & n.nodeType
}
-//nolint:unused
-func (n *Node) makeNotWithMetadata() {
- n.nodeType = (nodeTypeMask ^ nodeTypeWithMetadata) & n.nodeType
-}
-
func (n *Node) SetObfuscationKey(obfuscationKey []byte) {
bytes := make([]byte, 32)
copy(bytes, obfuscationKey)
@@ -166,7 +151,7 @@ func (n *Node) LookupNode(ctx context.Context, path []byte, l Loader) (*Node, er
}
c := common(f.prefix, path)
if len(c) == len(f.prefix) {
- return f.Node.LookupNode(ctx, path[len(c):], l)
+ return f.LookupNode(ctx, path[len(c):], l)
}
return nil, notFound(path)
}
@@ -259,7 +244,7 @@ func (n *Node) Add(ctx context.Context, path, entry []byte, metadata map[string]
nn.SetObfuscationKey(n.obfuscationKey)
}
nn.refBytesSize = n.refBytesSize
- f.Node.updateIsWithPathSeparator(rest)
+ f.updateIsWithPathSeparator(rest)
nn.forks[rest[0]] = &fork{rest, f.Node}
nn.makeEdge()
// if common path is full path new node is value type
@@ -319,7 +304,7 @@ func (n *Node) Remove(ctx context.Context, path []byte, ls LoadSaver) error {
delete(n.forks, path[0])
return nil
}
- return f.Node.Remove(ctx, rest, ls)
+ return f.Remove(ctx, rest, ls)
}
func common(a, b []byte) (c []byte) {
@@ -350,7 +335,7 @@ func (n *Node) HasPrefix(ctx context.Context, path []byte, l Loader) (bool, erro
}
c := common(f.prefix, path)
if len(c) == len(f.prefix) {
- return f.Node.HasPrefix(ctx, path[len(c):], l)
+ return f.HasPrefix(ctx, path[len(c):], l)
}
if bytes.HasPrefix(f.prefix, path) {
return true, nil
diff --git a/pkg/manifest/mantaray/node_test.go b/pkg/manifest/mantaray/node_test.go
index b9a8da787d7..80b0ed4963b 100644
--- a/pkg/manifest/mantaray/node_test.go
+++ b/pkg/manifest/mantaray/node_test.go
@@ -41,14 +41,14 @@ func TestAddAndLookup(t *testing.T) {
[]byte("aa"),
[]byte("b"),
}
- for i := 0; i < len(testCases); i++ {
+ for i := range testCases {
c := testCases[i]
e := append(make([]byte, 32-len(c)), c...)
err := n.Add(ctx, c, e, nil, nil)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
- for j := 0; j < i; j++ {
+ for j := range i {
d := testCases[j]
m, err := n.Lookup(ctx, d, nil)
if err != nil {
diff --git a/pkg/manifest/mantaray/persist.go b/pkg/manifest/mantaray/persist.go
index 8ac812e50d8..259785d803c 100644
--- a/pkg/manifest/mantaray/persist.go
+++ b/pkg/manifest/mantaray/persist.go
@@ -71,7 +71,7 @@ func (n *Node) save(ctx context.Context, s Saver) error {
eg, ectx := errgroup.WithContext(ctx)
for _, f := range n.forks {
eg.Go(func() error {
- return f.Node.save(ectx, s)
+ return f.save(ectx, s)
})
}
if err := eg.Wait(); err != nil {
diff --git a/pkg/manifest/mantaray/persist_test.go b/pkg/manifest/mantaray/persist_test.go
index e0eb86e6b09..619989d3803 100644
--- a/pkg/manifest/mantaray/persist_test.go
+++ b/pkg/manifest/mantaray/persist_test.go
@@ -32,7 +32,7 @@ func TestPersistIdempotence(t *testing.T) {
}
ctx := context.Background()
var ls mantaray.LoadSaver = newMockLoadSaver()
- for i := 0; i < len(paths); i++ {
+ for i := range paths {
c := paths[i]
err := n.Save(ctx, ls)
if err != nil {
@@ -49,7 +49,7 @@ func TestPersistIdempotence(t *testing.T) {
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
- for i := 0; i < len(paths); i++ {
+ for i := range paths {
c := paths[i]
m, err := n.Lookup(ctx, c, ls)
if err != nil {
diff --git a/pkg/manifest/mantaray/walker.go b/pkg/manifest/mantaray/walker.go
index a46533a72da..1a27ed23b8b 100644
--- a/pkg/manifest/mantaray/walker.go
+++ b/pkg/manifest/mantaray/walker.go
@@ -6,7 +6,7 @@ package mantaray
import (
"context"
- "sort"
+ "slices"
)
// WalkNodeFunc is the type of the function called for each node visited
@@ -34,7 +34,7 @@ func walkNode(ctx context.Context, path []byte, l Loader, n *Node, walkFn WalkNo
for k := range n.forks {
keys = append(keys, k)
}
- sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+ slices.Sort(keys)
for _, k := range keys {
v := n.forks[k]
diff --git a/pkg/manifest/mantaray/walker_test.go b/pkg/manifest/mantaray/walker_test.go
index f6d72687a39..76c23f96e69 100644
--- a/pkg/manifest/mantaray/walker_test.go
+++ b/pkg/manifest/mantaray/walker_test.go
@@ -55,7 +55,7 @@ func TestWalkNode(t *testing.T) {
n := mantaray.New()
- for i := 0; i < len(toAdd); i++ {
+ for i := range toAdd {
c := toAdd[i]
e := append(make([]byte, 32-len(c)), c...)
err := n.Add(ctx, c, e, nil, nil)
diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go
index cbe2ae20c58..64ceef0c51e 100644
--- a/pkg/metrics/metrics.go
+++ b/pkg/metrics/metrics.go
@@ -18,7 +18,7 @@ type Collector interface {
Metrics() []prometheus.Collector
}
-func PrometheusCollectorsFromFields(i interface{}) (cs []prometheus.Collector) {
+func PrometheusCollectorsFromFields(i any) (cs []prometheus.Collector) {
v := reflect.Indirect(reflect.ValueOf(i))
for i := 0; i < v.NumField(); i++ {
if !v.Field(i).CanInterface() {
diff --git a/pkg/node/bootstrap.go b/pkg/node/bootstrap.go
deleted file mode 100644
index 2a6dc21661d..00000000000
--- a/pkg/node/bootstrap.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright 2022 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package node
-
-import (
- "context"
- "crypto/ecdsa"
- "encoding/hex"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math/big"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethersphere/bee/v2/pkg/accounting"
- "github.com/ethersphere/bee/v2/pkg/addressbook"
- "github.com/ethersphere/bee/v2/pkg/crypto"
- "github.com/ethersphere/bee/v2/pkg/feeds"
- "github.com/ethersphere/bee/v2/pkg/feeds/factory"
- "github.com/ethersphere/bee/v2/pkg/file"
- "github.com/ethersphere/bee/v2/pkg/file/joiner"
- "github.com/ethersphere/bee/v2/pkg/file/loadsave"
- "github.com/ethersphere/bee/v2/pkg/file/redundancy"
- "github.com/ethersphere/bee/v2/pkg/hive"
- "github.com/ethersphere/bee/v2/pkg/log"
- "github.com/ethersphere/bee/v2/pkg/manifest"
- "github.com/ethersphere/bee/v2/pkg/p2p/libp2p"
- "github.com/ethersphere/bee/v2/pkg/postage"
- "github.com/ethersphere/bee/v2/pkg/pricer"
- "github.com/ethersphere/bee/v2/pkg/pricing"
- "github.com/ethersphere/bee/v2/pkg/retrieval"
- "github.com/ethersphere/bee/v2/pkg/settlement/pseudosettle"
- "github.com/ethersphere/bee/v2/pkg/spinlock"
- "github.com/ethersphere/bee/v2/pkg/stabilization"
- "github.com/ethersphere/bee/v2/pkg/storage"
- "github.com/ethersphere/bee/v2/pkg/storer"
- "github.com/ethersphere/bee/v2/pkg/swarm"
- "github.com/ethersphere/bee/v2/pkg/topology"
- "github.com/ethersphere/bee/v2/pkg/topology/kademlia"
- "github.com/ethersphere/bee/v2/pkg/topology/lightnode"
- "github.com/ethersphere/bee/v2/pkg/tracing"
- "github.com/hashicorp/go-multierror"
- ma "github.com/multiformats/go-multiaddr"
-)
-
-var (
- // zeroed out while waiting to be replacement for the new snapshot feed address
- // must be different to avoid stale reads on the old contract
- snapshotFeed = swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000")
- errDataMismatch = errors.New("data length mismatch")
-)
-
-const (
- getSnapshotRetries = 3
- retryWait = time.Second * 5
- timeout = time.Minute * 2
-)
-
-func bootstrapNode(
- ctx context.Context,
- addr string,
- swarmAddress swarm.Address,
- nonce []byte,
- addressbook addressbook.Interface,
- bootnodes []ma.Multiaddr,
- lightNodes *lightnode.Container,
- stateStore storage.StateStorer,
- signer crypto.Signer,
- networkID uint64,
- logger log.Logger,
- libp2pPrivateKey *ecdsa.PrivateKey,
- detector *stabilization.Detector,
- o *Options,
-) (snapshot *postage.ChainSnapshot, retErr error) {
- tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{
- Enabled: o.TracingEnabled,
- Endpoint: o.TracingEndpoint,
- ServiceName: o.TracingServiceName,
- })
- if err != nil {
- return nil, fmt.Errorf("tracer: %w", err)
- }
-
- p2pCtx, p2pCancel := context.WithCancel(ctx)
-
- b := &Bee{
- ctxCancel: p2pCancel,
- tracerCloser: tracerCloser,
- }
-
- defer func() {
- retErr = multierror.Append(new(multierror.Error), retErr, b.Shutdown()).ErrorOrNil()
- }()
-
- p2ps, err := libp2p.New(p2pCtx, signer, networkID, swarmAddress, addr, addressbook, stateStore, lightNodes, logger, tracer, libp2p.Options{
- PrivateKey: libp2pPrivateKey,
- NATAddr: o.NATAddr,
- EnableWS: o.EnableWS,
- WelcomeMessage: o.WelcomeMessage,
- FullNode: false,
- Nonce: nonce,
- })
- if err != nil {
- return nil, fmt.Errorf("p2p service: %w", err)
- }
- b.p2pService = p2ps
- b.p2pHalter = p2ps
-
- hive := hive.New(p2ps, addressbook, networkID, o.BootnodeMode, o.AllowPrivateCIDRs, logger)
-
- if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
- return nil, fmt.Errorf("hive service: %w", err)
- }
- b.hiveCloser = hive
-
- kad, err := kademlia.New(swarmAddress, addressbook, hive, p2ps, detector, logger,
- kademlia.Options{Bootnodes: bootnodes, BootnodeMode: o.BootnodeMode, StaticNodes: o.StaticNodes, DataDir: o.DataDir})
- if err != nil {
- return nil, fmt.Errorf("unable to create kademlia: %w", err)
- }
- b.topologyCloser = kad
- b.topologyHalter = kad
- hive.SetAddPeersHandler(kad.AddPeers)
- p2ps.SetPickyNotifier(kad)
-
- paymentThreshold, _ := new(big.Int).SetString(o.PaymentThreshold, 10)
- lightPaymentThreshold := new(big.Int).Div(paymentThreshold, big.NewInt(lightFactor))
-
- pricer := pricer.NewFixedPricer(swarmAddress, basePrice)
-
- pricing := pricing.New(p2ps, logger, paymentThreshold, lightPaymentThreshold, big.NewInt(minPaymentThreshold))
- if err = p2ps.AddProtocol(pricing.Protocol()); err != nil {
- return nil, fmt.Errorf("pricing service: %w", err)
- }
-
- acc, err := accounting.NewAccounting(
- paymentThreshold,
- o.PaymentTolerance,
- o.PaymentEarly,
- logger,
- stateStore,
- pricing,
- big.NewInt(refreshRate),
- lightFactor,
- p2ps,
- )
- if err != nil {
- return nil, fmt.Errorf("accounting: %w", err)
- }
- b.accountingCloser = acc
-
- // bootstrapper mode uses the light node refresh rate
- enforcedRefreshRate := big.NewInt(lightRefreshRate)
-
- pseudosettleService := pseudosettle.New(p2ps, logger, stateStore, acc, enforcedRefreshRate, enforcedRefreshRate, p2ps)
- if err = p2ps.AddProtocol(pseudosettleService.Protocol()); err != nil {
- return nil, fmt.Errorf("pseudosettle service: %w", err)
- }
-
- acc.SetRefreshFunc(pseudosettleService.Pay)
-
- pricing.SetPaymentThresholdObserver(acc)
-
- localStore, err := storer.New(ctx, "", &storer.Options{
- CacheCapacity: 1_000_000,
- })
- if err != nil {
- return nil, fmt.Errorf("local store creation: %w", err)
- }
- b.localstoreCloser = localStore
-
- radiusF := func() (uint8, error) { return swarm.MaxBins, nil }
-
- retrieve := retrieval.New(swarmAddress, radiusF, localStore, p2ps, kad, logger, acc, pricer, tracer, o.RetrievalCaching)
- if err = p2ps.AddProtocol(retrieve.Protocol()); err != nil {
- return nil, fmt.Errorf("retrieval service: %w", err)
- }
- b.retrievalCloser = retrieve
-
- localStore.SetRetrievalService(retrieve)
-
- if err := kad.Start(p2pCtx); err != nil {
- return nil, err
- }
-
- if err := p2ps.Ready(); err != nil {
- return nil, err
- }
-
- if err := waitPeers(kad); err != nil {
- return nil, errors.New("timed out waiting for kademlia peers")
- }
-
- logger.Info("bootstrap: trying to fetch stamps snapshot")
-
- var (
- snapshotRootCh swarm.Chunk
- reader file.Joiner
- l int64
- eventsJSON []byte
- )
-
- for i := 0; i < getSnapshotRetries; i++ {
- if err != nil {
- time.Sleep(retryWait)
- }
-
- ctx, cancel := context.WithTimeout(ctx, timeout)
- defer cancel()
-
- snapshotRootCh, err = getLatestSnapshot(ctx, localStore.Download(true), localStore.Cache(), snapshotFeed)
- if err != nil {
- logger.Warning("bootstrap: fetching snapshot failed", "error", err)
- continue
- }
- break
- }
- if err != nil {
- return nil, err
- }
-
- for i := 0; i < getSnapshotRetries; i++ {
- if err != nil {
- time.Sleep(retryWait)
- }
-
- ctx, cancel := context.WithTimeout(ctx, timeout)
- defer cancel()
-
- reader, l, err = joiner.NewJoiner(ctx, localStore.Download(true), localStore.Cache(), snapshotRootCh.Address(), snapshotRootCh)
- if err != nil {
- logger.Warning("bootstrap: file joiner failed", "error", err)
- continue
- }
-
- eventsJSON, err = io.ReadAll(reader)
- if err != nil {
- logger.Warning("bootstrap: reading failed", "error", err)
- continue
- }
-
- if len(eventsJSON) != int(l) {
- err = errDataMismatch
- logger.Warning("bootstrap: count mismatch", "error", err)
- continue
- }
- break
- }
- if err != nil {
- return nil, err
- }
-
- events := postage.ChainSnapshot{}
- err = json.Unmarshal(eventsJSON, &events)
- if err != nil {
- return nil, err
- }
-
- return &events, nil
-}
-
-// wait till some peers are connected. returns true if all is ok
-func waitPeers(kad *kademlia.Kad) error {
- const minPeersCount = 25
- return spinlock.WaitWithInterval(time.Minute, time.Second, func() bool {
- count := 0
- _ = kad.EachConnectedPeer(func(_ swarm.Address, _ uint8) (bool, bool, error) {
- count++
- return false, false, nil
- }, topology.Select{})
- return count >= minPeersCount
- })
-}
-
-func getLatestSnapshot(
- ctx context.Context,
- st storage.Getter,
- putter storage.Putter,
- address swarm.Address,
-) (swarm.Chunk, error) {
- ls := loadsave.NewReadonly(st, putter, redundancy.DefaultLevel)
- feedFactory := factory.New(st)
-
- m, err := manifest.NewDefaultManifestReference(
- address,
- ls,
- )
- if err != nil {
- return nil, fmt.Errorf("not a manifest: %w", err)
- }
-
- e, err := m.Lookup(ctx, "/")
- if err != nil {
- return nil, fmt.Errorf("node lookup: %w", err)
- }
-
- var (
- owner, topic []byte
- t = new(feeds.Type)
- )
- meta := e.Metadata()
- if e := meta["swarm-feed-owner"]; e != "" {
- owner, err = hex.DecodeString(e)
- if err != nil {
- return nil, err
- }
- }
- if e := meta["swarm-feed-topic"]; e != "" {
- topic, err = hex.DecodeString(e)
- if err != nil {
- return nil, err
- }
- }
- if e := meta["swarm-feed-type"]; e != "" {
- err := t.FromString(e)
- if err != nil {
- return nil, err
- }
- }
- if len(owner) == 0 || len(topic) == 0 {
- return nil, fmt.Errorf("node lookup: %s", "feed metadata absent")
- }
- f := feeds.New(topic, common.BytesToAddress(owner))
-
- l, err := feedFactory.NewLookup(*t, f)
- if err != nil {
- return nil, fmt.Errorf("feed lookup failed: %w", err)
- }
-
- u, _, _, err := l.At(ctx, time.Now().Unix(), 0)
- if err != nil {
- return nil, err
- }
-
- return feeds.GetWrappedChunk(ctx, st, u)
-}
-
-func batchStoreExists(s storage.StateStorer) (bool, error) {
- hasOne := false
- err := s.Iterate("batchstore_", func(key, value []byte) (stop bool, err error) {
- hasOne = true
- return true, err
- })
-
- return hasOne, err
-}
diff --git a/pkg/node/chain.go b/pkg/node/chain.go
index 5931c8dde8e..78fc0a5448d 100644
--- a/pkg/node/chain.go
+++ b/pkg/node/chain.go
@@ -13,9 +13,7 @@ import (
"strings"
"time"
- "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/bee/v2/pkg/config"
@@ -31,9 +29,8 @@ import (
"github.com/ethersphere/bee/v2/pkg/settlement/swap/swapprotocol"
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/transaction"
+ "github.com/ethersphere/bee/v2/pkg/transaction/backendnoop"
"github.com/ethersphere/bee/v2/pkg/transaction/wrapped"
- "github.com/ethersphere/go-sw3-abi/sw3abi"
- "github.com/prometheus/client_golang/prometheus"
)
const (
@@ -49,37 +46,42 @@ func InitChain(
logger log.Logger,
stateStore storage.StateStorer,
endpoint string,
- oChainID int64,
+ chainID int64,
signer crypto.Signer,
pollingInterval time.Duration,
chainEnabled bool,
+ minimumGasTipCap uint64,
) (transaction.Backend, common.Address, int64, transaction.Monitor, transaction.Service, error) {
- var backend transaction.Backend = &noOpChainBackend{
- chainID: oChainID,
- }
+ backend := backendnoop.New(chainID)
if chainEnabled {
- // connect to the real one
rpcClient, err := rpc.DialContext(ctx, endpoint)
if err != nil {
return nil, common.Address{}, 0, nil, nil, fmt.Errorf("dial blockchain client: %w", err)
}
var versionString string
- err = rpcClient.CallContext(ctx, &versionString, "web3_clientVersion")
- if err != nil {
- logger.Info("could not connect to backend; in a swap-enabled network a working blockchain node (for xdai network in production, sepolia in testnet) is required; check your node or specify another node using --blockchain-rpc-endpoint.", "backend_endpoint", endpoint)
- return nil, common.Address{}, 0, nil, nil, fmt.Errorf("blockchain client get version: %w", err)
+ if err = rpcClient.CallContext(ctx, &versionString, "web3_clientVersion"); err != nil {
+ logger.Info("could not connect to backend; "+
+ "in a swap-enabled network a working blockchain node "+
+ "(for xDAI network in production, SepoliaETH in testnet) is required; "+
+ "check your node or specify another node using --blockchain-rpc-endpoint.",
+ "blockchain-rpc-endpoint", endpoint)
+ return nil, common.Address{}, 0, nil, nil, fmt.Errorf("get client version: %w", err)
}
logger.Info("connected to blockchain backend", "version", versionString)
- backend = wrapped.NewBackend(ethclient.NewClient(rpcClient))
+ backend = wrapped.NewBackend(ethclient.NewClient(rpcClient), minimumGasTipCap)
}
- chainID, err := backend.ChainID(ctx)
+ backendChainID, err := backend.ChainID(ctx)
if err != nil {
- return nil, common.Address{}, 0, nil, nil, fmt.Errorf("get chain id: %w", err)
+ return nil, common.Address{}, 0, nil, nil, fmt.Errorf("getting chain id: %w", err)
+ }
+
+ if chainID != -1 && chainID != backendChainID.Int64() {
+ return nil, common.Address{}, 0, nil, nil, fmt.Errorf("connected to wrong network: expected chain id %d, got %d", chainID, backendChainID.Int64())
}
overlayEthAddress, err := signer.EthereumAddress()
@@ -89,18 +91,19 @@ func InitChain(
transactionMonitor := transaction.NewMonitor(logger, backend, overlayEthAddress, pollingInterval, cancellationDepth)
- transactionService, err := transaction.NewService(logger, overlayEthAddress, backend, signer, stateStore, chainID, transactionMonitor)
+ transactionService, err := transaction.NewService(logger, overlayEthAddress, backend, signer, stateStore, backendChainID, transactionMonitor)
if err != nil {
- return nil, common.Address{}, 0, nil, nil, fmt.Errorf("new transaction service: %w", err)
+ return nil, common.Address{}, 0, nil, nil, fmt.Errorf("transaction service: %w", err)
}
- return backend, overlayEthAddress, chainID.Int64(), transactionMonitor, transactionService, nil
+ return backend, overlayEthAddress, backendChainID.Int64(), transactionMonitor, transactionService, nil
}
// InitChequebookFactory will initialize the chequebook factory with the given
// chain backend.
func InitChequebookFactory(logger log.Logger, backend transaction.Backend, chainID int64, transactionService transaction.Service, factoryAddress string) (chequebook.Factory, error) {
var currentFactory common.Address
+
chainCfg, found := config.GetByChainID(chainID)
foundFactory := chainCfg.CurrentFactoryAddress
@@ -340,85 +343,3 @@ func (m *noOpChequebookService) LastCheque(common.Address) (*chequebook.SignedCh
func (m *noOpChequebookService) LastCheques() (map[common.Address]*chequebook.SignedCheque, error) {
return nil, postagecontract.ErrChainDisabled
}
-
-// noOpChainBackend is a noOp implementation for transaction.Backend interface.
-type noOpChainBackend struct {
- chainID int64
-}
-
-// BlockByNumber implements transaction.Backend.
-func (m *noOpChainBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-func (m noOpChainBackend) Metrics() []prometheus.Collector {
- return nil
-}
-
-func (m noOpChainBackend) CodeAt(context.Context, common.Address, *big.Int) ([]byte, error) {
- return common.FromHex(sw3abi.SimpleSwapFactoryDeployedBinv0_6_9), nil
-}
-
-func (m noOpChainBackend) CallContract(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error) {
- return nil, errors.New("disabled chain backend")
-}
-
-func (m noOpChainBackend) HeaderByNumber(context.Context, *big.Int) (*types.Header, error) {
- h := new(types.Header)
- h.Time = uint64(time.Now().Unix())
- return h, nil
-}
-
-func (m noOpChainBackend) PendingNonceAt(context.Context, common.Address) (uint64, error) {
- panic("chain no op: PendingNonceAt")
-}
-
-func (m noOpChainBackend) SuggestGasPrice(context.Context) (*big.Int, error) {
- panic("chain no op: SuggestGasPrice")
-}
-
-func (m noOpChainBackend) SuggestGasTipCap(context.Context) (*big.Int, error) {
- panic("chain no op: SuggestGasPrice")
-}
-
-func (m noOpChainBackend) EstimateGas(context.Context, ethereum.CallMsg) (uint64, error) {
- panic("chain no op: EstimateGas")
-}
-
-func (m noOpChainBackend) SendTransaction(context.Context, *types.Transaction) error {
- panic("chain no op: SendTransaction")
-}
-
-func (m noOpChainBackend) TransactionReceipt(context.Context, common.Hash) (*types.Receipt, error) {
- r := new(types.Receipt)
- r.BlockNumber = big.NewInt(1)
- return r, nil
-}
-
-func (m noOpChainBackend) TransactionByHash(context.Context, common.Hash) (tx *types.Transaction, isPending bool, err error) {
- panic("chain no op: TransactionByHash")
-}
-
-func (m noOpChainBackend) BlockNumber(context.Context) (uint64, error) {
- return 4, nil
-}
-
-func (m noOpChainBackend) BalanceAt(context.Context, common.Address, *big.Int) (*big.Int, error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-func (m noOpChainBackend) NonceAt(context.Context, common.Address, *big.Int) (uint64, error) {
- panic("chain no op: NonceAt")
-}
-
-func (m noOpChainBackend) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
- panic("chain no op: FilterLogs")
-}
-
-func (m noOpChainBackend) ChainID(context.Context) (*big.Int, error) {
- return big.NewInt(m.chainID), nil
-}
-
-func (m noOpChainBackend) Close() error {
- return nil
-}
diff --git a/pkg/node/devnode.go b/pkg/node/devnode.go
index 9ff5363c455..b3b7e1e8677 100644
--- a/pkg/node/devnode.go
+++ b/pkg/node/devnode.go
@@ -244,7 +244,7 @@ func NewDevBee(logger log.Logger, o *DevOptions) (b *DevBee, err error) {
lightNodes = lightnode.NewContainer(swarm.NewAddress(nil))
pingPong = mockPingPong.New(pong)
p2ps = mockP2P.New(
- mockP2P.WithConnectFunc(func(ctx context.Context, addr multiaddr.Multiaddr) (address *bzz.Address, err error) {
+ mockP2P.WithConnectFunc(func(ctx context.Context, addr []multiaddr.Multiaddr) (address *bzz.Address, err error) {
return &bzz.Address{}, nil
}), mockP2P.WithDisconnectFunc(
func(swarm.Address, string) error {
@@ -373,7 +373,7 @@ func NewDevBee(logger log.Logger, o *DevOptions) (b *DevBee, err error) {
apiService.SetP2P(p2ps)
apiService.SetSwarmAddress(&swarmAddress)
- apiListener, err := net.Listen("tcp", o.APIAddr)
+ apiListener, err := (&net.ListenConfig{}).Listen(context.Background(), "tcp", o.APIAddr)
if err != nil {
return nil, fmt.Errorf("api listener: %w", err)
}
diff --git a/pkg/node/export_test.go b/pkg/node/export_test.go
new file mode 100644
index 00000000000..3a142d94f7f
--- /dev/null
+++ b/pkg/node/export_test.go
@@ -0,0 +1,7 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package node
+
+var ValidatePublicAddress = validatePublicAddress
diff --git a/pkg/node/metrics.go b/pkg/node/metrics.go
new file mode 100644
index 00000000000..a161c39ffe7
--- /dev/null
+++ b/pkg/node/metrics.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package node
+
+import (
+ "github.com/ethersphere/bee/v2/pkg/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type nodeMetrics struct {
+ // WarmupDuration measures time in seconds for the node warmup to complete
+ WarmupDuration prometheus.Histogram
+ // FullSyncDuration measures time in seconds for the full sync to complete
+ FullSyncDuration prometheus.Histogram
+}
+
+func newMetrics() nodeMetrics {
+ subsystem := "init"
+
+ return nodeMetrics{
+ WarmupDuration: prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metrics.Namespace,
+ Subsystem: subsystem,
+ Name: "warmup_duration_seconds",
+ Help: "Duration in seconds for node warmup to complete",
+ // middle range should be more infrequent (because of addressbook)
+ Buckets: []float64{10, 20, 25, 30, 35, 40, 45, 50, 60, 70, 90, 120, 180, 240, 300, 350, 380, 400, 420, 440, 460, 480, 550, 600},
+ },
+ ),
+ FullSyncDuration: prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metrics.Namespace,
+ Subsystem: subsystem,
+ Name: "full_sync_duration_minutes",
+ Help: "Duration in minutes for node full sync to complete",
+ // middle range should be more frequent
+ Buckets: []float64{80, 90, 100, 110,
+ 120, 125, 130, 135, 140, 145, 150, 155, 160, 165, 170, 175, 180, // 2-3 hours range
+ 190, 200, 210, 220, 230, 240},
+ },
+ ),
+ }
+}
+
+func getMetrics(nodeMetrics nodeMetrics) []prometheus.Collector {
+ return metrics.PrometheusCollectorsFromFields(nodeMetrics)
+}
diff --git a/pkg/node/node.go b/pkg/node/node.go
index 7d14ceb1b15..7564d894f58 100644
--- a/pkg/node/node.go
+++ b/pkg/node/node.go
@@ -20,6 +20,7 @@ import (
"net/http"
"path/filepath"
"runtime"
+ "strconv"
"sync"
"sync/atomic"
"time"
@@ -62,6 +63,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/stabilization"
"github.com/ethersphere/bee/v2/pkg/status"
"github.com/ethersphere/bee/v2/pkg/steward"
+ "github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storageincentives"
"github.com/ethersphere/bee/v2/pkg/storageincentives/redistribution"
"github.com/ethersphere/bee/v2/pkg/storageincentives/staking"
@@ -80,6 +82,7 @@ import (
ma "github.com/multiformats/go-multiaddr"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/sha3"
+ "golang.org/x/net/idna"
"golang.org/x/sync/errgroup"
)
@@ -106,7 +109,6 @@ type Bee struct {
pullSyncCloser io.Closer
pssCloser io.Closer
gsocCloser io.Closer
- ethClientCloser io.Closer
transactionMonitorCloser io.Closer
transactionCloser io.Closer
listenerCloser io.Closer
@@ -116,23 +118,27 @@ type Bee struct {
saludCloser io.Closer
storageIncetivesCloser io.Closer
pushSyncCloser io.Closer
- retrievalCloser io.Closer
shutdownInProgress bool
shutdownMutex sync.Mutex
syncingStopped *syncutil.Signaler
accesscontrolCloser io.Closer
+ ethClientCloser func()
}
type Options struct {
Addr string
AllowPrivateCIDRs bool
APIAddr string
+ EnableWSS bool
+ WSSAddr string
+ AutoTLSStorageDir string
BlockchainRpcEndpoint string
BlockProfile bool
BlockTime time.Duration
BootnodeMode bool
Bootnodes []string
CacheCapacity uint64
+ AutoTLSCAEndpoint string
ChainID int64
ChequebookEnable bool
CORSAllowedOrigins []string
@@ -143,11 +149,15 @@ type Options struct {
DBWriteBufferSize uint64
EnableStorageIncentives bool
EnableWS bool
+ AutoTLSDomain string
+ AutoTLSRegistrationEndpoint string
FullNodeMode bool
Logger log.Logger
+ MinimumGasTipCap uint64
MinimumStorageRadius uint
MutexProfile bool
NATAddr string
+ NATWSSAddr string
NeighborhoodSuggester string
PaymentEarly int64
PaymentThreshold string
@@ -172,7 +182,6 @@ type Options struct {
TracingEndpoint string
TracingServiceName string
TrxDebugMode bool
- UsePostageSnapshot bool
WarmupTime time.Duration
WelcomeMessage string
WhitelistedWithdrawalAddress []string
@@ -206,6 +215,12 @@ func NewBee(
session accesscontrol.Session,
o *Options,
) (b *Bee, err error) {
+ // start time for node warmup duration measurement
+ warmupStartTime := time.Now()
+ var pullSyncStartTime time.Time
+
+ nodeMetrics := newMetrics()
+
tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{
Enabled: o.TracingEnabled,
Endpoint: o.TracingEndpoint,
@@ -215,6 +230,14 @@ func NewBee(
return nil, fmt.Errorf("tracer: %w", err)
}
+ if err := validatePublicAddress(o.NATAddr); err != nil {
+ return nil, fmt.Errorf("invalid NAT address %s: %w", o.NATAddr, err)
+ }
+
+ if err := validatePublicAddress(o.NATWSSAddr); err != nil {
+ return nil, fmt.Errorf("invalid NAT WSS address %s: %w", o.NATWSSAddr, err)
+ }
+
ctx, ctxCancel := context.WithCancel(ctx)
defer func() {
// if there's been an error on this function
@@ -351,16 +374,10 @@ func NewBee(
}
var (
- chainBackend transaction.Backend
- overlayEthAddress common.Address
- chainID int64
- transactionService transaction.Service
- transactionMonitor transaction.Monitor
- chequebookFactory chequebook.Factory
- chequebookService chequebook.Service = new(noOpChequebookService)
- chequeStore chequebook.ChequeStore
- cashoutService chequebook.CashoutService
- erc20Service erc20.Service
+ chequebookService chequebook.Service = new(noOpChequebookService)
+ chequeStore chequebook.ChequeStore
+ cashoutService chequebook.CashoutService
+ erc20Service erc20.Service
)
chainEnabled := isChainEnabled(o, o.BlockchainRpcEndpoint, logger)
@@ -382,7 +399,7 @@ func NewBee(
}
}
- chainBackend, overlayEthAddress, chainID, transactionMonitor, transactionService, err = InitChain(
+ chainBackend, overlayEthAddress, chainID, transactionMonitor, transactionService, err := InitChain(
ctx,
logger,
stateStore,
@@ -390,18 +407,16 @@ func NewBee(
o.ChainID,
signer,
o.BlockTime,
- chainEnabled)
+ chainEnabled,
+ o.MinimumGasTipCap,
+ )
if err != nil {
return nil, fmt.Errorf("init chain: %w", err)
}
- b.ethClientCloser = chainBackend
- logger.Info("using chain with network network", "chain_id", chainID, "network_id", networkID)
-
- if o.ChainID != -1 && o.ChainID != chainID {
- return nil, fmt.Errorf("connected to wrong blockchain network; network chainID %d; configured chainID %d", chainID, o.ChainID)
- }
+ logger.Info("using chain with network", "chain_id", chainID, "network_id", networkID)
+ b.ethClientCloser = chainBackend.Close
b.transactionCloser = tracerCloser
b.transactionMonitorCloser = transactionMonitor
@@ -439,7 +454,7 @@ func NewBee(
runtime.SetBlockProfileRate(1)
}
- apiListener, err := net.Listen("tcp", o.APIAddr)
+ apiListener, err := (&net.ListenConfig{}).Listen(ctx, "tcp", o.APIAddr)
if err != nil {
return nil, fmt.Errorf("api listener: %w", err)
}
@@ -485,22 +500,24 @@ func NewBee(
b.apiCloser = apiServer
}
- // Sync the with the given Ethereum backend:
- isSynced, _, err := transaction.IsSynced(ctx, chainBackend, maxDelay)
- if err != nil {
- return nil, fmt.Errorf("is synced: %w", err)
- }
- if !isSynced {
- logger.Info("waiting to sync with the blockchain backend")
-
- err := transaction.WaitSynced(ctx, logger, chainBackend, maxDelay)
+ if chainEnabled {
+ // Sync the with the given Ethereum backend:
+ isSynced, _, err := transaction.IsSynced(ctx, chainBackend, maxDelay)
if err != nil {
- return nil, fmt.Errorf("waiting backend sync: %w", err)
+ return nil, fmt.Errorf("is synced: %w", err)
+ }
+ if !isSynced {
+ logger.Info("waiting to sync with the blockchain backend")
+
+ err := transaction.WaitSynced(ctx, logger, chainBackend, maxDelay)
+ if err != nil {
+ return nil, fmt.Errorf("waiting backend sync: %w", err)
+ }
}
}
if o.SwapEnable {
- chequebookFactory, err = InitChequebookFactory(logger, chainBackend, chainID, transactionService, o.SwapFactoryAddress)
+ chequebookFactory, err := InitChequebookFactory(logger, chainBackend, chainID, transactionService, o.SwapFactoryAddress)
if err != nil {
return nil, fmt.Errorf("init chequebook factory: %w", err)
}
@@ -595,42 +612,22 @@ func NewBee(
logger.Info("node warmup check initiated. monitoring activity rate to determine readiness.", "startTime", t)
}
- detector.OnStabilized = func(t time.Time, totalCount int) {
- logger.Info("node warmup complete. system is considered stable and ready.", "stabilizationTime", t, "totalMonitoredEvents", totalCount)
+ warmupMeasurement := func(t time.Time, totalCount int) {
+ warmupDuration := t.Sub(warmupStartTime).Seconds()
+ logger.Info("node warmup complete. system is considered stable and ready.",
+ "stabilizationTime", t,
+ "totalMonitoredEvents", totalCount,
+ "warmupDurationSeconds", warmupDuration)
+
+ nodeMetrics.WarmupDuration.Observe(warmupDuration)
+ pullSyncStartTime = t
}
+ detector.OnStabilized = warmupMeasurement
detector.OnPeriodComplete = func(t time.Time, periodCount int, stDev float64) {
logger.Debug("node warmup check: period complete.", "periodEndTime", t, "eventsInPeriod", periodCount, "rateStdDev", stDev)
}
- var initBatchState *postage.ChainSnapshot
- // Bootstrap node with postage snapshot only if it is running on mainnet, is a fresh
- // install or explicitly asked by user to resync
- if networkID == mainnetNetworkID && o.UsePostageSnapshot && (!batchStoreExists || o.Resync) {
- start := time.Now()
- logger.Info("cold postage start detected. fetching postage stamp snapshot from swarm")
- initBatchState, err = bootstrapNode(
- ctx,
- addr,
- swarmAddress,
- nonce,
- addressbook,
- bootnodes,
- lightNodes,
- stateStore,
- signer,
- networkID,
- log.Noop,
- libp2pPrivateKey,
- detector,
- o,
- )
- logger.Info("bootstrapper created", "elapsed", time.Since(start))
- if err != nil {
- logger.Error(err, "bootstrapper failed to fetch batch state")
- }
- }
-
var registry *prometheus.Registry
if apiService != nil {
@@ -638,14 +635,21 @@ func NewBee(
}
p2ps, err := libp2p.New(ctx, signer, networkID, swarmAddress, addr, addressbook, stateStore, lightNodes, logger, tracer, libp2p.Options{
- PrivateKey: libp2pPrivateKey,
- NATAddr: o.NATAddr,
- EnableWS: o.EnableWS,
- WelcomeMessage: o.WelcomeMessage,
- FullNode: o.FullNodeMode,
- Nonce: nonce,
- ValidateOverlay: chainEnabled,
- Registry: registry,
+ PrivateKey: libp2pPrivateKey,
+ NATAddr: o.NATAddr,
+ NATWSSAddr: o.NATWSSAddr,
+ EnableWS: o.EnableWS,
+ EnableWSS: o.EnableWSS,
+ WSSAddr: o.WSSAddr,
+ AutoTLSStorageDir: o.AutoTLSStorageDir,
+ AutoTLSDomain: o.AutoTLSDomain,
+ AutoTLSRegistrationEndpoint: o.AutoTLSRegistrationEndpoint,
+ AutoTLSCAEndpoint: o.AutoTLSCAEndpoint,
+ WelcomeMessage: o.WelcomeMessage,
+ FullNode: o.FullNodeMode,
+ Nonce: nonce,
+ ValidateOverlay: chainEnabled,
+ Registry: registry,
})
if err != nil {
return nil, fmt.Errorf("p2p service: %w", err)
@@ -718,7 +722,7 @@ func NewBee(
return nil, fmt.Errorf("pingpong service: %w", err)
}
- hive := hive.New(p2ps, addressbook, networkID, o.BootnodeMode, o.AllowPrivateCIDRs, logger)
+ hive := hive.New(p2ps, addressbook, networkID, o.BootnodeMode, o.AllowPrivateCIDRs, swarmAddress, logger)
if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
return nil, fmt.Errorf("hive service: %w", err)
@@ -803,7 +807,7 @@ func NewBee(
}
)
- if !o.SkipPostageSnapshot && !batchStoreExists && (networkID == mainnetNetworkID) {
+ if !o.SkipPostageSnapshot && !batchStoreExists && (networkID == mainnetNetworkID) && beeNodeMode != api.UltraLightMode {
chainBackend := NewSnapshotLogFilterer(logger, archiveSnapshotGetter{})
snapshotEventListener := listener.New(b.syncingStopped, logger, chainBackend, postageStampContractAddress, postageStampContractABI, o.BlockTime, postageSyncingStallingTimeout, postageSyncingBackoffTimeout)
@@ -812,7 +816,7 @@ func NewBee(
if err != nil {
logger.Error(err, "failed to initialize batch service from snapshot, continuing outside snapshot block...")
} else {
- err = snapshotBatchSvc.Start(ctx, postageSyncStart, initBatchState)
+ err = snapshotBatchSvc.Start(ctx, postageSyncStart)
syncStatus.Store(true)
if err != nil {
syncErr.Store(err)
@@ -840,7 +844,7 @@ func NewBee(
}
if o.FullNodeMode {
- err = batchSvc.Start(ctx, postageSyncStart, initBatchState)
+ err = batchSvc.Start(ctx, postageSyncStart)
syncStatus.Store(true)
if err != nil {
syncErr.Store(err)
@@ -849,7 +853,7 @@ func NewBee(
} else {
go func() {
logger.Info("started postage contract data sync in the background...")
- err := batchSvc.Start(ctx, postageSyncStart, initBatchState)
+ err := batchSvc.Start(ctx, postageSyncStart)
syncStatus.Store(true)
if err != nil {
syncErr.Store(err)
@@ -975,7 +979,7 @@ func NewBee(
return nil, fmt.Errorf("status service: %w", err)
}
- saludService := salud.New(nodeStatus, kad, localStore, logger, detector, api.FullMode.String(), salud.DefaultMinPeersPerBin, salud.DefaultDurPercentile, salud.DefaultConnsPercentile)
+ saludService := salud.New(nodeStatus, kad, localStore, logger, detector, api.FullMode.String(), salud.DefaultDurPercentile, salud.DefaultConnsPercentile)
b.saludCloser = saludService
rC, unsub := saludService.SubscribeNetworkStorageRadius()
@@ -1130,6 +1134,37 @@ func NewBee(
localStore.StartReserveWorker(ctx, pullerService, waitNetworkRFunc)
nodeStatus.SetSync(pullerService)
+ // measure full sync duration
+ detector.OnStabilized = func(t time.Time, totalCount int) {
+ warmupMeasurement(t, totalCount)
+
+ reserveThreshold := reserveCapacity >> 1
+ isFullySynced := func() bool {
+ return pullerService.SyncRate() == 0 && saludService.IsHealthy() && localStore.ReserveSize() >= reserveThreshold
+ }
+
+ syncCheckTicker := time.NewTicker(2 * time.Second)
+ go func() {
+ defer syncCheckTicker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-syncCheckTicker.C:
+ synced := isFullySynced()
+ logger.Debug("sync status check", "synced", synced, "reserveSize", localStore.ReserveSize(), "syncRate", pullerService.SyncRate())
+ if synced {
+ fullSyncTime := pullSyncStartTime.Sub(t)
+ logger.Info("full sync done", "duration", fullSyncTime)
+ nodeMetrics.FullSyncDuration.Observe(fullSyncTime.Minutes())
+ syncCheckTicker.Stop()
+ return
+ }
+ }
+ }
+ }()
+ }
+
if o.EnableStorageIncentives {
redistributionContractAddress := chainCfg.RedistributionAddress
@@ -1143,9 +1178,9 @@ func NewBee(
redistributionContract := redistribution.New(swarmAddress, overlayEthAddress, logger, transactionService, redistributionContractAddress, abiutil.MustParseABI(chainCfg.RedistributionABI), o.TrxDebugMode)
isFullySynced := func() bool {
- reserveTreshold := reserveCapacity * 5 / 10
+ reserveThreshold := reserveCapacity * 5 / 10
logger.Debug("Sync status check evaluated", "stabilized", detector.IsStabilized())
- return localStore.ReserveSize() >= reserveTreshold && pullerService.SyncRate() == 0 && detector.IsStabilized()
+ return localStore.ReserveSize() >= reserveThreshold && pullerService.SyncRate() == 0 && detector.IsStabilized()
}
agent, err = storageincentives.New(
@@ -1217,6 +1252,7 @@ func NewBee(
apiService.MustRegisterMetrics(kad.Metrics()...)
apiService.MustRegisterMetrics(saludService.Metrics()...)
apiService.MustRegisterMetrics(stateStoreMetrics.Metrics()...)
+ apiService.MustRegisterMetrics(getMetrics(nodeMetrics)...)
if pullerService != nil {
apiService.MustRegisterMetrics(pullerService.Metrics()...)
@@ -1384,7 +1420,10 @@ func (b *Bee) Shutdown() error {
wg.Wait()
- tryClose(b.ethClientCloser, "eth client")
+ if b.ethClientCloser != nil {
+ b.ethClientCloser()
+ }
+
tryClose(b.accesscontrolCloser, "accesscontrol")
tryClose(b.tracerCloser, "tracer")
tryClose(b.topologyCloser, "topology driver")
@@ -1403,11 +1442,71 @@ func isChainEnabled(o *Options, swapEndpoint string, logger log.Logger) bool {
chainDisabled := swapEndpoint == ""
lightMode := !o.FullNodeMode
- if lightMode && chainDisabled { // ultra light mode is LightNode mode with chain disabled
- logger.Info("starting with a disabled chain backend")
+ if lightMode && chainDisabled {
+ logger.Info("chain backend disabled - starting in ultra-light mode",
+ "full_node_mode", o.FullNodeMode,
+ "blockchain-rpc-endpoint", swapEndpoint)
return false
}
- logger.Info("starting with an enabled chain backend")
+ logger.Info("chain backend enabled - blockchain functionality available",
+ "full_node_mode", o.FullNodeMode,
+ "blockchain-rpc-endpoint", swapEndpoint)
return true // all other modes operate require chain enabled
}
+
+func validatePublicAddress(addr string) error {
+ if addr == "" {
+ return nil
+ }
+
+ host, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ if host == "" {
+ return errors.New("host is empty")
+ }
+ if port == "" {
+ return errors.New("port is empty")
+ }
+ if _, err := strconv.ParseUint(port, 10, 16); err != nil {
+ return fmt.Errorf("port is not a valid number: %w", err)
+ }
+ if host == "localhost" {
+ return errors.New("localhost is not a valid address")
+ }
+
+ ip := net.ParseIP(host)
+ if ip != nil {
+ if ip.IsLoopback() {
+ return errors.New("loopback address is not a valid address")
+ }
+ if ip.IsPrivate() {
+ return errors.New("private address is not a valid address")
+ }
+ return nil
+ }
+
+ idnaValidator := idna.New(
+ idna.ValidateLabels(true),
+ idna.VerifyDNSLength(true),
+ idna.StrictDomainName(true),
+ idna.CheckHyphens(true),
+ )
+ if _, err := idnaValidator.ToASCII(host); err != nil {
+ return fmt.Errorf("invalid hostname: %w", err)
+ }
+
+ return nil
+}
+
+func batchStoreExists(s storage.StateStorer) (bool, error) {
+ hasOne := false
+ err := s.Iterate("batchstore_", func(key, value []byte) (stop bool, err error) {
+ hasOne = true
+ return true, err
+ })
+
+ return hasOne, err
+}
diff --git a/pkg/node/node_test.go b/pkg/node/node_test.go
new file mode 100644
index 00000000000..5b5ca7ad967
--- /dev/null
+++ b/pkg/node/node_test.go
@@ -0,0 +1,109 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package node_test
+
+import (
+ "testing"
+
+ "github.com/ethersphere/bee/v2/pkg/node"
+)
+
+func TestValidatePublicAddress(t *testing.T) {
+ t.Parallel()
+
+ testCases := []struct {
+ name string
+ addr string
+ expErr bool
+ }{
+ {
+ name: "empty host",
+ addr: ":1635",
+ expErr: true,
+ },
+ {
+ name: "localhost",
+ addr: "localhost:1635",
+ expErr: true,
+ },
+ {
+ name: "loopback",
+ addr: "127.0.0.1:1635",
+ expErr: true,
+ },
+ {
+ name: "loopback ipv6",
+ addr: "[::1]:1635",
+ expErr: true,
+ },
+ {
+ name: "missing port",
+ addr: "1.2.3.4",
+ expErr: true,
+ },
+ {
+ name: "empty port",
+ addr: "1.2.3.4:",
+ expErr: true,
+ },
+ {
+ name: "invalid port number",
+ addr: "1.2.3.4:abc",
+ expErr: true,
+ },
+ {
+ name: "valid",
+ addr: "1.2.3.4:1635",
+ expErr: false,
+ },
+ {
+ name: "valid ipv6",
+ addr: "[2001:db8::1]:1635",
+ expErr: false,
+ },
+ {
+ name: "empty",
+ addr: "",
+ expErr: false,
+ },
+ {
+ name: "valid hostname",
+ addr: "example.com:8080",
+ expErr: false,
+ },
+ {
+ name: "valid hostname with hyphen",
+ addr: "test-example.com:8080",
+ expErr: false,
+ },
+ {
+ name: "private IP",
+ addr: "192.168.1.1:8080",
+ expErr: true,
+ },
+ {
+ name: "invalid hostname format",
+ addr: "invalid..hostname:8080",
+ expErr: true,
+ },
+ {
+ name: "hostname starts with hyphen",
+ addr: "-test.com:8080",
+ expErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := node.ValidatePublicAddress(tc.addr)
+ if tc.expErr && err == nil {
+ t.Fatal("expected error, but got none")
+ }
+ if !tc.expErr && err != nil {
+ t.Fatalf("expected no error, but got: %v", err)
+ }
+ })
+ }
+}
diff --git a/pkg/node/snapshot.go b/pkg/node/snapshot.go
index aac0a6eb140..ef190c0e26c 100644
--- a/pkg/node/snapshot.go
+++ b/pkg/node/snapshot.go
@@ -50,6 +50,10 @@ func NewSnapshotLogFilterer(logger log.Logger, getter SnapshotGetter) *SnapshotL
}
}
+func (f *SnapshotLogFilterer) GetBatchSnapshot() []byte {
+ return f.getter.GetBatchSnapshot()
+}
+
// loadSnapshot is responsible for loading and processing the snapshot data.
// It is intended to be called exactly once by initOnce.Do.
func (f *SnapshotLogFilterer) loadSnapshot() error {
diff --git a/pkg/node/snapshot_test.go b/pkg/node/snapshot_test.go
index 81edbcaad5f..eab031067fd 100644
--- a/pkg/node/snapshot_test.go
+++ b/pkg/node/snapshot_test.go
@@ -20,6 +20,8 @@ import (
"github.com/ethersphere/bee/v2/pkg/postage/listener"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ archive "github.com/ethersphere/batch-archive"
)
type mockSnapshotGetter struct {
@@ -33,6 +35,12 @@ func (m mockSnapshotGetter) GetBatchSnapshot() []byte {
return m.data
}
+type realSnapshotGetter struct{}
+
+func (r realSnapshotGetter) GetBatchSnapshot() []byte {
+ return archive.GetBatchSnapshot()
+}
+
func makeSnapshotData(logs []types.Log) []byte {
var buf bytes.Buffer
gz := gzip.NewWriter(&buf)
@@ -149,3 +157,77 @@ func TestNewSnapshotLogFilterer(t *testing.T) {
assert.Equal(t, 0, res[3].Topics[0].Cmp(common.HexToHash("0xa4")))
})
}
+
+func TestSnapshotLogFilterer_RealSnapshot(t *testing.T) {
+ t.Parallel()
+
+ getter := realSnapshotGetter{}
+ filterer := node.NewSnapshotLogFilterer(log.Noop, getter)
+
+ t.Run("block number", func(t *testing.T) {
+ blockNumber, err := filterer.BlockNumber(context.Background())
+ assert.NoError(t, err)
+ assert.Greater(t, blockNumber, uint64(0))
+ })
+
+ t.Run("filter range", func(t *testing.T) {
+ // arbitrary range that should exist in the snapshot
+ from := big.NewInt(20000000)
+ to := big.NewInt(20001000)
+ res, err := filterer.FilterLogs(context.Background(), ethereum.FilterQuery{
+ FromBlock: from,
+ ToBlock: to,
+ })
+ require.NoError(t, err)
+ for _, l := range res {
+ assert.GreaterOrEqual(t, l.BlockNumber, from.Uint64())
+ assert.LessOrEqual(t, l.BlockNumber, to.Uint64())
+ }
+ })
+
+ t.Run("filter address mismatch", func(t *testing.T) {
+ // random address that should not match the postage stamp contract
+ addr := common.HexToAddress("0x1234567890123456789012345678901234567890")
+ res, err := filterer.FilterLogs(context.Background(), ethereum.FilterQuery{
+ Addresses: []common.Address{addr},
+ })
+ require.NoError(t, err)
+ assert.Empty(t, res)
+ })
+}
+
+func BenchmarkNewSnapshotLogFilterer_Load(b *testing.B) {
+ getter := realSnapshotGetter{}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ filterer := node.NewSnapshotLogFilterer(log.Noop, getter)
+ _, err := filterer.BlockNumber(context.Background())
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkSnapshotLogFilterer(b *testing.B) {
+ getter := realSnapshotGetter{}
+ filterer := node.NewSnapshotLogFilterer(log.Noop, getter)
+ // ensure loaded
+ if _, err := filterer.BlockNumber(context.Background()); err != nil {
+ b.Fatal(err)
+ }
+
+ b.Run("FilterLogs", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ from := big.NewInt(20000000)
+ to := big.NewInt(20001000)
+ _, err := filterer.FilterLogs(context.Background(), ethereum.FilterQuery{
+ FromBlock: from,
+ ToBlock: to,
+ })
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/pkg/p2p/error.go b/pkg/p2p/error.go
index 5ce54d12e10..4ec8e2cfea3 100644
--- a/pkg/p2p/error.go
+++ b/pkg/p2p/error.go
@@ -20,6 +20,8 @@ var (
ErrDialLightNode = errors.New("target peer is a light node")
// ErrPeerBlocklisted is returned if peer is on blocklist
ErrPeerBlocklisted = errors.New("peer blocklisted")
+ // ErrUnsupportedAddresses is returned when all peer addresses use unsupported transports
+ ErrUnsupportedAddresses = errors.New("no supported addresses")
)
const (
diff --git a/pkg/p2p/libp2p/composite_resolver_test.go b/pkg/p2p/libp2p/composite_resolver_test.go
new file mode 100644
index 00000000000..43a87319807
--- /dev/null
+++ b/pkg/p2p/libp2p/composite_resolver_test.go
@@ -0,0 +1,162 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package libp2p_test
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// mockResolver is a mock implementation of handshake.AdvertisableAddressResolver.
+type mockResolver struct {
+ resolveFunc func(ma.Multiaddr) (ma.Multiaddr, error)
+ called bool
+}
+
+func (m *mockResolver) Resolve(addr ma.Multiaddr) (ma.Multiaddr, error) {
+ m.called = true
+ if m.resolveFunc != nil {
+ return m.resolveFunc(addr)
+ }
+ return addr, nil
+}
+
+func TestCompositeAddressResolver(t *testing.T) {
+ t.Parallel()
+
+ resolvedAddr, _ := ma.NewMultiaddr("/ip4/1.1.1.1/tcp/1634")
+
+ tests := []struct {
+ name string
+ addr string
+ setup func() (handshake.AdvertisableAddressResolver, *mockResolver, *mockResolver)
+ wantTCPCalled bool
+ wantWSSCalled bool
+ wantErr error
+ wantResolved bool
+ }{
+ {
+ name: "wss address",
+ addr: "/ip4/10.233.99.40/tcp/1635/tls/sni/*.libp2p.direct/ws/p2p/QmWbXocGMpfa8zApx9kCNwfmc35bbRJv136bdtuQjbR4wL",
+ setup: func() (handshake.AdvertisableAddressResolver, *mockResolver, *mockResolver) {
+ tcpR := &mockResolver{}
+ wssR := &mockResolver{
+ resolveFunc: func(addr ma.Multiaddr) (ma.Multiaddr, error) {
+ return resolvedAddr, nil
+ },
+ }
+ return libp2p.NewCompositeAddressResolver(tcpR, wssR), tcpR, wssR
+ },
+ wantWSSCalled: true,
+ wantResolved: true,
+ },
+ {
+ name: "tcp address",
+ addr: "/ip4/10.233.99.40/tcp/1634/p2p/QmWbXocGMpfa8zApx9kCNwfmc35bbRJv136bdtuQjbR4wL",
+ setup: func() (handshake.AdvertisableAddressResolver, *mockResolver, *mockResolver) {
+ tcpR := &mockResolver{
+ resolveFunc: func(addr ma.Multiaddr) (ma.Multiaddr, error) {
+ return resolvedAddr, nil
+ },
+ }
+ wssR := &mockResolver{}
+ return libp2p.NewCompositeAddressResolver(tcpR, wssR), tcpR, wssR
+ },
+ wantTCPCalled: true,
+ wantResolved: true,
+ },
+ {
+ name: "deprecated wss address",
+ addr: "/ip4/10.233.99.40/tcp/1635/wss/p2p/QmWbXocGMpfa8zApx9kCNwfmc35bbRJv136bdtuQjbR4wL",
+ setup: func() (handshake.AdvertisableAddressResolver, *mockResolver, *mockResolver) {
+ tcpR := &mockResolver{
+ resolveFunc: func(addr ma.Multiaddr) (ma.Multiaddr, error) {
+ return resolvedAddr, nil
+ },
+ }
+ wssR := &mockResolver{}
+ return libp2p.NewCompositeAddressResolver(tcpR, wssR), tcpR, wssR
+ },
+ wantTCPCalled: true,
+ wantResolved: true,
+ },
+ {
+ name: "ws without tls",
+ addr: "/ip4/10.233.99.40/tcp/1635/ws/p2p/QmWbXocGMpfa8zApx9kCNwfmc35bbRJv136bdtuQjbR4wL",
+ setup: func() (handshake.AdvertisableAddressResolver, *mockResolver, *mockResolver) {
+ tcpR := &mockResolver{
+ resolveFunc: func(addr ma.Multiaddr) (ma.Multiaddr, error) {
+ return resolvedAddr, nil
+ },
+ }
+ wssR := &mockResolver{}
+ return libp2p.NewCompositeAddressResolver(tcpR, wssR), tcpR, wssR
+ },
+ wantTCPCalled: true,
+ wantResolved: true,
+ },
+ {
+ name: "nil wss resolver",
+ addr: "/ip4/10.233.99.40/tcp/1635/tls/sni/*.libp2p.direct/ws/p2p/QmWbXocGMpfa8zApx9kCNwfmc35bbRJv136bdtuQjbR4wL",
+ setup: func() (handshake.AdvertisableAddressResolver, *mockResolver, *mockResolver) {
+ tcpR := &mockResolver{}
+ return libp2p.NewCompositeAddressResolver(tcpR, nil), tcpR, nil
+ },
+ wantWSSCalled: false,
+ wantTCPCalled: false,
+ wantResolved: false,
+ },
+ {
+ name: "nil tcp resolver",
+ addr: "/ip4/10.233.99.40/tcp/1634/p2p/QmWbXocGMpfa8zApx9kCNwfmc35bbRJv136bdtuQjbR4wL",
+ setup: func() (handshake.AdvertisableAddressResolver, *mockResolver, *mockResolver) {
+ wssR := &mockResolver{}
+ return libp2p.NewCompositeAddressResolver(nil, wssR), nil, wssR
+ },
+ wantTCPCalled: false,
+ wantWSSCalled: false,
+ wantResolved: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ resolver, tcpResolver, wssResolver := tc.setup()
+
+ addr, err := ma.NewMultiaddr(tc.addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got, err := resolver.Resolve(addr)
+ if !errors.Is(err, tc.wantErr) {
+ t.Errorf("got error %v, want %v", err, tc.wantErr)
+ }
+
+ if tcpResolver != nil && tc.wantTCPCalled != tcpResolver.called {
+ t.Errorf("tcpResolver called: got %v, want %v", tcpResolver.called, tc.wantTCPCalled)
+ }
+ if wssResolver != nil && tc.wantWSSCalled != wssResolver.called {
+ t.Errorf("wssResolver called: got %v, want %v", wssResolver.called, tc.wantWSSCalled)
+ }
+
+ if tc.wantResolved {
+ if !got.Equal(resolvedAddr) {
+ t.Errorf("got resolved addr %s, want %s", got, resolvedAddr)
+ }
+ } else {
+ if !got.Equal(addr) {
+ t.Errorf("got addr %s, want %s", got, addr)
+ }
+ }
+ })
+ }
+}
diff --git a/pkg/p2p/libp2p/connections_test.go b/pkg/p2p/libp2p/connections_test.go
index c85b016d801..a31eb908f15 100644
--- a/pkg/p2p/libp2p/connections_test.go
+++ b/pkg/p2p/libp2p/connections_test.go
@@ -13,9 +13,12 @@ import (
"reflect"
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
+ "github.com/ethersphere/bee/v2/pkg/bzz"
+
"github.com/ethersphere/bee/v2/pkg/addressbook"
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/p2p"
@@ -25,6 +28,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/statestore/mock"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/topology/lightnode"
+ libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
libp2pm "github.com/libp2p/go-libp2p"
@@ -32,9 +36,12 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
ma "github.com/multiformats/go-multiaddr"
+
+ libp2pmock "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/mock"
)
const (
@@ -59,8 +66,7 @@ func TestAddresses(t *testing.T) {
func TestConnectDisconnect(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -85,11 +91,46 @@ func TestConnectDisconnect(t *testing.T) {
expectPeersEventually(t, s1)
}
+// TestConnectSelf verifies that a service cannot connect to itself,
+// preventing self-connection attempts.
+func TestConnectSelf(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+
+ s1, _ := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
+ FullNode: true,
+ }})
+
+ // Get own underlay addresses
+ addr := serviceUnderlayAddress(t, s1)
+
+ // Attempt to connect to self
+ bzzAddr, err := s1.Connect(ctx, addr)
+
+ // Should return an error
+ if err == nil {
+ t.Fatal("expected error when connecting to self, got nil")
+ }
+
+ // Should contain "cannot connect to self" in error message
+ if !strings.Contains(err.Error(), "cannot connect to self") {
+ t.Fatalf("expected 'cannot connect to self' error, got: %v", err)
+ }
+
+ // bzzAddr should be nil
+ if bzzAddr != nil {
+ t.Fatal("expected nil bzz address when connecting to self")
+ }
+
+ // Verify no peers are connected
+ expectPeers(t, s1)
+}
+
func TestConnectToLightPeer(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, _ := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: false,
@@ -110,8 +151,7 @@ func TestConnectToLightPeer(t *testing.T) {
func TestLightPeerLimit(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
var (
limit = 3
@@ -129,7 +169,7 @@ func TestLightPeerLimit(t *testing.T) {
addr := serviceUnderlayAddress(t, sf)
- for i := 0; i < 5; i++ {
+ for range 5 {
sl, _ := newService(t, 1, libp2pServiceOpts{
notifier: notifier,
libp2pOpts: libp2p.Options{
@@ -160,8 +200,7 @@ func TestStreamsMaxIncomingLimit(t *testing.T) {
maxIncomingStreams := 5000
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -244,7 +283,7 @@ func TestStreamsMaxIncomingLimit(t *testing.T) {
// close random streams to validate new streams creation
random := rand.New(rand.NewSource(time.Now().UnixNano()))
- for i := 0; i < closeStreamCount; i++ {
+ for range closeStreamCount {
n := random.Intn(len(streams))
if err := streams[n].Reset(); err != nil {
t.Error(err)
@@ -283,8 +322,7 @@ func TestStreamsMaxIncomingLimit(t *testing.T) {
func TestDoubleConnect(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -311,8 +349,7 @@ func TestDoubleConnect(t *testing.T) {
func TestDoubleDisconnect(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -347,8 +384,7 @@ func TestDoubleDisconnect(t *testing.T) {
func TestMultipleConnectDisconnect(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -392,8 +428,7 @@ func TestMultipleConnectDisconnect(t *testing.T) {
func TestConnectDisconnectOnAllAddresses(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -406,7 +441,7 @@ func TestConnectDisconnectOnAllAddresses(t *testing.T) {
t.Fatal(err)
}
for _, addr := range addrs {
- bzzAddr, err := s2.Connect(ctx, addr)
+ bzzAddr, err := s2.Connect(ctx, []ma.Multiaddr{addr})
if err != nil {
t.Fatal(err)
}
@@ -426,8 +461,7 @@ func TestConnectDisconnectOnAllAddresses(t *testing.T) {
func TestDoubleConnectOnAllAddresses(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{
notifier: mockNotifier(noopCf, noopDf, true),
@@ -443,14 +477,14 @@ func TestDoubleConnectOnAllAddresses(t *testing.T) {
// creating new remote host for each address
s2, overlay2 := newService(t, 1, libp2pServiceOpts{notifier: mockNotifier(noopCf, noopDf, true)})
- if _, err := s2.Connect(ctx, addr); err != nil {
+ if _, err := s2.Connect(ctx, []ma.Multiaddr{addr}); err != nil {
t.Fatal(err)
}
expectPeers(t, s2, overlay1)
expectPeersEventually(t, s1, overlay2)
- if _, err := s2.Connect(ctx, addr); !errors.Is(err, p2p.ErrAlreadyConnected) {
+ if _, err := s2.Connect(ctx, []ma.Multiaddr{addr}); !errors.Is(err, p2p.ErrAlreadyConnected) {
t.Fatalf("expected %s error, got %s error", p2p.ErrAlreadyConnected, err)
}
@@ -471,8 +505,7 @@ func TestDoubleConnectOnAllAddresses(t *testing.T) {
func TestDifferentNetworkIDs(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, _ := newService(t, 1, libp2pServiceOpts{})
s2, _ := newService(t, 2, libp2pServiceOpts{})
@@ -490,8 +523,7 @@ func TestDifferentNetworkIDs(t *testing.T) {
func TestConnectWithEnabledWSTransports(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{
libp2pOpts: libp2p.Options{
@@ -507,6 +539,15 @@ func TestConnectWithEnabledWSTransports(t *testing.T) {
},
})
+ defer func() {
+ if err := s1.Close(); err != nil {
+ t.Errorf("s1.Close: %v", err)
+ }
+ if err := s2.Close(); err != nil {
+ t.Errorf("s2.Close: %v", err)
+ }
+ }()
+
addr := serviceUnderlayAddress(t, s1)
if _, err := s2.Connect(ctx, addr); err != nil {
@@ -521,17 +562,15 @@ func TestConnectWithEnabledWSTransports(t *testing.T) {
func TestConnectRepeatHandshake(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
}})
s2, overlay2 := newService(t, 1, libp2pServiceOpts{})
- addr := serviceUnderlayAddress(t, s1)
-
- _, err := s2.Connect(ctx, addr)
+ addrs := serviceUnderlayAddress(t, s1)
+ _, err := s2.Connect(ctx, addrs)
if err != nil {
t.Fatal(err)
}
@@ -539,7 +578,7 @@ func TestConnectRepeatHandshake(t *testing.T) {
expectPeers(t, s2, overlay1)
expectPeersEventually(t, s1, overlay2)
- info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
+ info, err := libp2ppeer.AddrInfoFromP2pAddr(addrs[0])
if err != nil {
t.Fatal(err)
}
@@ -549,7 +588,7 @@ func TestConnectRepeatHandshake(t *testing.T) {
t.Fatal(err)
}
- if _, err := s2.HandshakeService().Handshake(ctx, s2.WrapStream(stream), info.Addrs[0], info.ID); err != nil {
+ if _, err := s2.HandshakeService().Handshake(ctx, s2.WrapStream(stream), addrs); err != nil {
t.Fatal(err)
}
@@ -634,8 +673,8 @@ func TestBlocklistedPeers(t *testing.T) {
FullNode: true,
}})
s2, _ := newService(t, 1, libp2pServiceOpts{})
- addr1 := serviceUnderlayAddress(t, s1)
- _, err := s2.Connect(context.Background(), addr1)
+ addrs1 := serviceUnderlayAddress(t, s1)
+ _, err := s2.Connect(context.Background(), addrs1)
if err != nil {
t.Fatal(err)
}
@@ -726,10 +765,10 @@ func TestTopologyNotifier(t *testing.T) {
})
s2.SetPickyNotifier(notifier2)
- addr := serviceUnderlayAddress(t, s1)
+ s1Addr := serviceUnderlayAddress(t, s1)
// s2 connects to s1, thus the notifier on s1 should be called on Connect
- bzzAddr, err := s2.Connect(ctx, addr)
+ bzzAddr, err := s2.Connect(ctx, s1Addr)
if err != nil {
t.Fatal(err)
}
@@ -745,7 +784,7 @@ func TestTopologyNotifier(t *testing.T) {
mtx.Unlock()
// check address book entries are there
- checkAddressbook(t, ab2, overlay1, addr)
+ checkAddressbook(t, ab2, overlay1, s1Addr)
// s2 disconnects from s1 so s1 disconnect notifiee should be called
if err := s2.Disconnect(bzzAddr.Overlay, testDisconnectMsg); err != nil {
@@ -785,6 +824,85 @@ func TestTopologyNotifier(t *testing.T) {
waitAddrSet(t, &n2disconnectedPeer.Address, &mtx, overlay1)
}
+func TestConnectWithAutoTLS(t *testing.T) {
+ t.Parallel()
+
+ certLoaded := make(chan struct{})
+ mockCertMgr := libp2pmock.NewMockP2PForgeCertMgr(func() {
+ close(certLoaded)
+ })
+
+ s1, _ := newService(t, 1, libp2pServiceOpts{
+ libp2pOpts: libp2p.Options{
+ EnableWS: true,
+ EnableWSS: true,
+ FullNode: true,
+ WSSAddr: ":0",
+ NATAddr: "127.0.0.1:1635",
+ NATWSSAddr: "127.0.0.1:1635",
+ },
+ autoTLSCertManager: mockCertMgr,
+ })
+
+ select {
+ case <-certLoaded:
+ case <-time.After(time.Second):
+ t.Fatal("onCertLoaded callback was not triggered")
+ }
+
+ if s1 == nil {
+ t.Fatal("service should not be nil")
+ }
+}
+
+func TestConnectWithAutoTLSAndWSTransports(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+
+ s1, overlay1 := newService(t, 1, libp2pServiceOpts{
+ libp2pOpts: libp2p.Options{
+ EnableWS: true,
+ EnableWSS: true,
+ FullNode: true,
+ WSSAddr: ":0",
+ NATAddr: "127.0.0.1:1635",
+ NATWSSAddr: "127.0.0.1:1635",
+ },
+ autoTLSCertManager: libp2pmock.NewMockP2PForgeCertMgr(nil),
+ })
+
+ s2, overlay2 := newService(t, 1, libp2pServiceOpts{
+ libp2pOpts: libp2p.Options{
+ EnableWS: true,
+ EnableWSS: true,
+ FullNode: true,
+ WSSAddr: ":0",
+ NATAddr: "127.0.0.1:1636",
+ NATWSSAddr: "127.0.0.1:1636",
+ },
+ autoTLSCertManager: libp2pmock.NewMockP2PForgeCertMgr(nil),
+ })
+
+ defer func() {
+ if err := s1.Close(); err != nil {
+ t.Errorf("s1.Close: %v", err)
+ }
+ if err := s2.Close(); err != nil {
+ t.Errorf("s2.Close: %v", err)
+ }
+ }()
+
+ addr := serviceUnderlayAddress(t, s1)
+
+ if _, err := s2.Connect(ctx, addr); err != nil {
+ t.Fatal(err)
+ }
+
+ expectPeers(t, s2, overlay1)
+ expectPeersEventually(t, s1, overlay2)
+}
+
// TestTopologyAnnounce checks that announcement
// works correctly for full nodes and light nodes.
func TestTopologyAnnounce(t *testing.T) {
@@ -852,7 +970,7 @@ func TestTopologyAnnounce(t *testing.T) {
expectPeersEventually(t, s1, overlay3)
called := false
- for i := 0; i < 20; i++ {
+ for range 20 {
mtx.Lock()
called = announceCalled
mtx.Unlock()
@@ -864,7 +982,7 @@ func TestTopologyAnnounce(t *testing.T) {
if !called {
t.Error("expected announce to be called")
}
- for i := 0; i < 10; i++ {
+ for range 10 {
mtx.Lock()
called = announceToCalled
mtx.Unlock()
@@ -890,7 +1008,7 @@ func TestTopologyAnnounce(t *testing.T) {
expectPeers(t, s2, overlay1)
expectPeersEventually(t, s1, overlay2, overlay3)
- for i := 0; i < 20; i++ {
+ for range 20 {
mtx.Lock()
called = announceToCalled
mtx.Unlock()
@@ -906,8 +1024,6 @@ func TestTopologyAnnounce(t *testing.T) {
}
func TestTopologyOverSaturated(t *testing.T) {
- t.Parallel()
-
var (
mtx sync.Mutex
ctx = context.Background()
@@ -954,10 +1070,9 @@ func TestTopologyOverSaturated(t *testing.T) {
addr := serviceUnderlayAddress(t, s1)
// s2 connects to s1, thus the notifier on s1 should be called on Connect
- _, err := s2.Connect(ctx, addr)
- if err == nil {
- t.Fatal("expected connect to fail but it didn't")
- }
+ // Connect might return nil if the handshake completes before the server processes the rejection (protocol race).
+ // We verify that the peer is eventually disconnected.
+ _, _ = s2.Connect(ctx, addr)
expectPeers(t, s1)
expectPeersEventually(t, s2)
@@ -970,8 +1085,7 @@ func TestWithDisconnectStreams(t *testing.T) {
const headersRWTimeout = 60 * time.Second
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -1019,10 +1133,7 @@ func TestWithDisconnectStreams(t *testing.T) {
}
func TestWithBlocklistStreams(t *testing.T) {
- t.Parallel()
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -1062,9 +1173,10 @@ func TestWithBlocklistStreams(t *testing.T) {
expectPeersEventually(t, s2)
expectPeersEventually(t, s1)
- if _, err := s2.Connect(ctx, s1_underlay); err == nil {
- t.Fatal("expected error when connecting to blocklisted peer")
- }
+ // s2 connects to s1, but because of blocklist it should fail
+ // Connect might return nil if the handshake completes before the server processes the blocklist (protocol race).
+ // We verify that the peer is eventually disconnected.
+ _, _ = s2.Connect(ctx, s1_underlay)
expectPeersEventually(t, s2)
expectPeersEventually(t, s1)
@@ -1073,8 +1185,7 @@ func TestWithBlocklistStreams(t *testing.T) {
func TestUserAgentLogging(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
// use concurrent-safe buffers as handlers are logging concurrently
s1Logs := new(buffer)
@@ -1271,7 +1382,7 @@ func waitAddrSet(t *testing.T, addr *swarm.Address, mtx *sync.Mutex, exp swarm.A
}
}
-func checkAddressbook(t *testing.T, ab addressbook.Getter, overlay swarm.Address, underlay ma.Multiaddr) {
+func checkAddressbook(t *testing.T, ab addressbook.Getter, overlay swarm.Address, underlays []ma.Multiaddr) {
t.Helper()
addr, err := ab.Get(overlay)
if err != nil {
@@ -1281,8 +1392,8 @@ func checkAddressbook(t *testing.T, ab addressbook.Getter, overlay swarm.Address
t.Fatalf("overlay mismatch. got %s want %s", addr.Overlay, overlay)
}
- if !addr.Underlay.Equal(underlay) {
- t.Fatalf("underlay mismatch. got %s, want %s", addr.Underlay, underlay)
+ if !bzz.AreUnderlaysEqual(addr.Underlays, underlays) {
+ t.Fatalf("underlay mismatch. got %s, want %s", addr.Underlays, underlays)
}
}
@@ -1377,3 +1488,122 @@ var (
noopReachability = func(p2p.ReachabilityStatus) {}
noopReachable = func(swarm.Address, p2p.ReachabilityStatus) {}
)
+
+func TestPeerMultiaddrsNoFallback(t *testing.T) {
+ t.Parallel()
+
+ s1, _ := newService(t, 1, libp2pServiceOpts{})
+
+ privKey, _, err := libp2pcrypto.GenerateEd25519Key(rand.New(rand.NewSource(time.Now().UnixNano())))
+ if err != nil {
+ t.Fatal(err)
+ }
+ unknownPeerID, err := libp2ppeer.IDFromPrivateKey(privKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ addrs, err := s1.PeerMultiaddrs(ctx, unknownPeerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(addrs) != 0 {
+ t.Fatalf("expected no addresses for unknown peer, got %v", addrs)
+ }
+}
+
+type emptyAddrsPeerstore struct {
+ peerstore.Peerstore
+ targetPeerID libp2ppeer.ID
+ mu sync.RWMutex
+}
+
+func (p *emptyAddrsPeerstore) setTarget(id libp2ppeer.ID) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.targetPeerID = id
+}
+
+func (p *emptyAddrsPeerstore) isTarget(id libp2ppeer.ID) bool {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.targetPeerID == id
+}
+
+func (p *emptyAddrsPeerstore) Addrs(id libp2ppeer.ID) []ma.Multiaddr {
+ if p.isTarget(id) {
+ return nil
+ }
+ return p.Peerstore.Addrs(id)
+}
+
+func (p *emptyAddrsPeerstore) AddrStream(ctx context.Context, id libp2ppeer.ID) <-chan ma.Multiaddr {
+ if p.isTarget(id) {
+ ch := make(chan ma.Multiaddr)
+ go func() {
+ <-ctx.Done()
+ close(ch)
+ }()
+ return ch
+ }
+ return p.Peerstore.AddrStream(ctx, id)
+}
+
+type emptyAddrsHost struct {
+ host.Host
+ ps *emptyAddrsPeerstore
+}
+
+func (h *emptyAddrsHost) Peerstore() peerstore.Peerstore {
+ return h.ps
+}
+
+func TestConnectEmptyPeerstoreSkipsAddressbookAndReacher(t *testing.T) {
+ t.Parallel()
+
+ ab1 := addressbook.New(mock.NewStateStore())
+
+ s1, _ := newService(t, 1, libp2pServiceOpts{
+ Addressbook: ab1,
+ libp2pOpts: libp2p.Options{
+ FullNode: true,
+ },
+ })
+
+ psWrapper := &emptyAddrsPeerstore{Peerstore: s1.Host().Peerstore()}
+
+ var reachableCalled atomic.Bool
+ notifier1 := mockNotifier(noopCf, noopDf, true)
+ notifier1.(*notifiee).reachable = func(_ swarm.Address, _ p2p.ReachabilityStatus) {
+ reachableCalled.Store(true)
+ }
+ s1.SetPickyNotifier(notifier1)
+
+ s2, overlay2 := newService(t, 1, libp2pServiceOpts{
+ libp2pOpts: libp2p.Options{
+ FullNode: true,
+ },
+ })
+
+ psWrapper.setTarget(s2.Host().ID())
+ s1.SetHost(&emptyAddrsHost{Host: s1.Host(), ps: psWrapper})
+
+ _, err := s2.Connect(context.Background(), serviceUnderlayAddress(t, s1))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectPeersEventually(t, s1, overlay2)
+
+ _, err = ab1.Get(overlay2)
+ if err == nil {
+ t.Fatal("expected addressbook to have no entry for NAT peer, but found one")
+ }
+
+ if reachableCalled.Load() {
+ t.Fatal("expected reacher not to be notified for NAT peer")
+ }
+}
diff --git a/pkg/p2p/libp2p/export_test.go b/pkg/p2p/libp2p/export_test.go
index f78b9f21275..3f56f7cc16e 100644
--- a/pkg/p2p/libp2p/export_test.go
+++ b/pkg/p2p/libp2p/export_test.go
@@ -7,11 +7,13 @@ package libp2p
import (
"context"
+ "github.com/ethersphere/bee/v2/pkg/bzz"
handshake "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake"
libp2pm "github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
+ ma "github.com/multiformats/go-multiaddr"
)
func (s *Service) HandshakeService() *handshake.Service {
@@ -30,6 +32,10 @@ func (s *Service) Host() host.Host {
return s.host
}
+func (s *Service) SetHost(h host.Host) {
+ s.host = h
+}
+
type StaticAddressResolver = staticAddressResolver
var (
@@ -42,3 +48,33 @@ func WithHostFactory(factory func(...libp2pm.Option) (host.Host, error)) Options
hostFactory: factory,
}
}
+
+func WithAutoTLSCertManager(m autoTLSCertManager) Options {
+ return Options{
+ autoTLSCertManager: m,
+ }
+}
+
+func SetAutoTLSCertManager(o *Options, m autoTLSCertManager) {
+ o.autoTLSCertManager = m
+}
+
+type AutoTLSCertManager = autoTLSCertManager
+
+var NewCompositeAddressResolver = newCompositeAddressResolver
+
+func (s *Service) FilterSupportedAddresses(addrs []ma.Multiaddr) []ma.Multiaddr {
+ return s.filterSupportedAddresses(addrs)
+}
+
+func (s *Service) PeerMultiaddrs(ctx context.Context, peerID libp2ppeer.ID) ([]ma.Multiaddr, error) {
+ return s.peerMultiaddrs(ctx, peerID)
+}
+
+func (s *Service) SetTransportFlags(hasTCP, hasWS, hasWSS bool) {
+ s.enabledTransports = map[bzz.TransportType]bool{
+ bzz.TransportTCP: hasTCP,
+ bzz.TransportWS: hasWS,
+ bzz.TransportWSS: hasWSS,
+ }
+}
diff --git a/pkg/p2p/libp2p/filter_addresses_test.go b/pkg/p2p/libp2p/filter_addresses_test.go
new file mode 100644
index 00000000000..a86b09e0b05
--- /dev/null
+++ b/pkg/p2p/libp2p/filter_addresses_test.go
@@ -0,0 +1,218 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package libp2p_test
+
+import (
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func mustMultiaddr(t *testing.T, s string) ma.Multiaddr {
+ t.Helper()
+ addr, err := ma.NewMultiaddr(s)
+ if err != nil {
+ t.Fatalf("failed to create multiaddr from %q: %v", s, err)
+ }
+ return addr
+}
+
+func TestFilterSupportedAddresses(t *testing.T) {
+ t.Parallel()
+
+ // Plain TCP addresses (IPv4)
+ tcpPrivate := "/ip4/10.233.99.120/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+ tcpPublic := "/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+ tcpLoopback := "/ip4/127.0.0.1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+ tcpIPv6Loopback := "/ip6/::1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+
+ // WSS addresses with TLS and SNI (full underlay format)
+ wssPrivate := "/ip4/10.233.99.120/tcp/1635/tls/sni/10-233-99-120.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+ wssPublic := "/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+ wssLoopback := "/ip4/127.0.0.1/tcp/1635/tls/sni/127-0-0-1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+ wssIPv6Loopback := "/ip6/::1/tcp/1635/tls/sni/0--1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+
+ // Plain WS address (no TLS)
+ wsPlain := "/ip4/127.0.0.1/tcp/1635/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj"
+
+ // All TCP addresses
+ allTCP := []string{tcpPrivate, tcpPublic, tcpLoopback, tcpIPv6Loopback}
+ // All WSS addresses
+ allWSS := []string{wssPrivate, wssPublic, wssLoopback, wssIPv6Loopback}
+
+ tests := []struct {
+ name string
+ hasTCP bool
+ hasWS bool
+ hasWSS bool
+ inputAddrs []string
+ expectedCount int
+ }{
+ {
+ name: "TCP only transport accepts all TCP addresses",
+ hasTCP: true,
+ hasWS: false,
+ hasWSS: false,
+ inputAddrs: allTCP,
+ expectedCount: 4,
+ },
+ {
+ name: "TCP only transport rejects WSS addresses",
+ hasTCP: true,
+ hasWS: false,
+ hasWSS: false,
+ inputAddrs: allWSS,
+ expectedCount: 0,
+ },
+ {
+ name: "WSS only transport accepts all WSS addresses",
+ hasTCP: false,
+ hasWS: false,
+ hasWSS: true,
+ inputAddrs: allWSS,
+ expectedCount: 4,
+ },
+ {
+ name: "WSS only transport rejects TCP addresses",
+ hasTCP: false,
+ hasWS: false,
+ hasWSS: true,
+ inputAddrs: allTCP,
+ expectedCount: 0,
+ },
+ {
+ name: "TCP and WSS transports accept mixed addresses",
+ hasTCP: true,
+ hasWS: false,
+ hasWSS: true,
+ inputAddrs: append(allTCP, allWSS...),
+ expectedCount: 8,
+ },
+ {
+ name: "WS transport accepts plain WS but not WSS",
+ hasTCP: false,
+ hasWS: true,
+ hasWSS: false,
+ inputAddrs: []string{wsPlain, wssPublic},
+ expectedCount: 1,
+ },
+ {
+ name: "WSS transport does not accept plain WS",
+ hasTCP: false,
+ hasWS: false,
+ hasWSS: true,
+ inputAddrs: []string{wsPlain},
+ expectedCount: 0,
+ },
+ {
+ name: "No transports reject all addresses",
+ hasTCP: false,
+ hasWS: false,
+ hasWSS: false,
+ inputAddrs: append(allTCP, allWSS...),
+ expectedCount: 0,
+ },
+ {
+ name: "Empty input returns empty output",
+ hasTCP: true,
+ hasWS: true,
+ hasWSS: true,
+ inputAddrs: []string{},
+ expectedCount: 0,
+ },
+ {
+ name: "Real node addresses with TCP and WSS",
+ hasTCP: true,
+ hasWS: false,
+ hasWSS: true,
+ inputAddrs: []string{tcpPrivate, wssPrivate, tcpPublic, wssPublic, tcpLoopback, wssLoopback, tcpIPv6Loopback, wssIPv6Loopback},
+ expectedCount: 8,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ // Create a service for testing
+ s, _ := newService(t, 1, libp2pServiceOpts{})
+
+ // Set transport flags
+ s.SetTransportFlags(tc.hasTCP, tc.hasWS, tc.hasWSS)
+
+ // Create multiaddrs from strings
+ addrs := make([]ma.Multiaddr, len(tc.inputAddrs))
+ for i, addrStr := range tc.inputAddrs {
+ addrs[i] = mustMultiaddr(t, addrStr)
+ }
+
+ // Filter addresses
+ filtered := s.FilterSupportedAddresses(addrs)
+
+ // Verify count
+ if len(filtered) != tc.expectedCount {
+ t.Errorf("expected %d addresses, got %d", tc.expectedCount, len(filtered))
+ }
+ })
+ }
+}
+
+func TestFilterSupportedAddresses_FullUnderlayAddresses(t *testing.T) {
+ t.Parallel()
+
+ // Complete set of underlay addresses from a real full underlay node
+ fullAddrs := []string{
+ "/ip4/10.233.99.120/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj",
+ "/ip4/10.233.99.120/tcp/1635/tls/sni/10-233-99-120.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj",
+ "/ip4/104.28.194.73/tcp/32002/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj",
+ "/ip4/104.28.194.73/tcp/32532/tls/sni/104-28-194-73.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj",
+ "/ip4/127.0.0.1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj",
+ "/ip4/127.0.0.1/tcp/1635/tls/sni/127-0-0-1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj",
+ "/ip6/::1/tcp/1634/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj",
+ "/ip6/::1/tcp/1635/tls/sni/0--1.k2k4r8pr3m3aug5nudg2y039qfj2gxw6wnlx0e0ghzxufcn38soyp9z4.libp2p.direct/ws/p2p/QmfSx1ujzboapD5h2CiqTJqUy46FeTDwXBszB3XUCfKEEj",
+ }
+
+ // Create multiaddrs
+ addrs := make([]ma.Multiaddr, len(fullAddrs))
+ for i, addrStr := range fullAddrs {
+ addrs[i] = mustMultiaddr(t, addrStr)
+ }
+
+ t.Run("TCP and WSS enabled accepts all full underlay addresses", func(t *testing.T) {
+ t.Parallel()
+ s, _ := newService(t, 1, libp2pServiceOpts{})
+ s.SetTransportFlags(true, false, true)
+
+ filtered := s.FilterSupportedAddresses(addrs)
+ // 4 TCP addresses + 4 WSS addresses = 8
+ if len(filtered) != 8 {
+ t.Errorf("expected 8 addresses, got %d", len(filtered))
+ }
+ })
+
+ t.Run("TCP only accepts half of full underlay addresses", func(t *testing.T) {
+ t.Parallel()
+ s, _ := newService(t, 1, libp2pServiceOpts{})
+ s.SetTransportFlags(true, false, false)
+
+ filtered := s.FilterSupportedAddresses(addrs)
+ // Only 4 TCP addresses
+ if len(filtered) != 4 {
+ t.Errorf("expected 4 addresses (TCP only), got %d", len(filtered))
+ }
+ })
+
+ t.Run("WSS only accepts half of full underlay addresses", func(t *testing.T) {
+ t.Parallel()
+ s, _ := newService(t, 1, libp2pServiceOpts{})
+ s.SetTransportFlags(false, false, true)
+
+ filtered := s.FilterSupportedAddresses(addrs)
+ // Only 4 WSS addresses
+ if len(filtered) != 4 {
+ t.Errorf("expected 4 addresses (WSS only), got %d", len(filtered))
+ }
+ })
+}
diff --git a/pkg/p2p/libp2p/headers_test.go b/pkg/p2p/libp2p/headers_test.go
index 5805dafa0d4..d46653bd719 100644
--- a/pkg/p2p/libp2p/headers_test.go
+++ b/pkg/p2p/libp2p/headers_test.go
@@ -23,8 +23,7 @@ func TestHeaders(t *testing.T) {
"other-key": []byte("other-value"),
}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -74,8 +73,7 @@ func TestHeaders(t *testing.T) {
func TestHeaders_empty(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -134,8 +132,7 @@ func TestHeadler(t *testing.T) {
"other-sent-key": []byte("other-sent-value"),
}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
diff --git a/pkg/p2p/libp2p/internal/blocklist/blocklist_test.go b/pkg/p2p/libp2p/internal/blocklist/blocklist_test.go
index 74a5ba98258..da80a9ebea1 100644
--- a/pkg/p2p/libp2p/internal/blocklist/blocklist_test.go
+++ b/pkg/p2p/libp2p/internal/blocklist/blocklist_test.go
@@ -111,7 +111,7 @@ func TestPeers(t *testing.T) {
func isIn(p swarm.Address, peers []p2p.BlockListedPeer, reason string, f bool) bool {
for _, v := range peers {
- if v.Address.Equal(p) && v.Reason == reason && v.Peer.FullNode == f {
+ if v.Address.Equal(p) && v.Reason == reason && v.FullNode == f {
return true
}
}
diff --git a/pkg/p2p/libp2p/internal/handshake/handshake.go b/pkg/p2p/libp2p/internal/handshake/handshake.go
index cb478c71d4a..e88dc02ef2b 100644
--- a/pkg/p2p/libp2p/internal/handshake/handshake.go
+++ b/pkg/p2p/libp2p/internal/handshake/handshake.go
@@ -5,9 +5,12 @@
package handshake
import (
+ "cmp"
"context"
"errors"
"fmt"
+ "slices"
+ "sync"
"sync/atomic"
"time"
@@ -60,6 +63,24 @@ type AdvertisableAddressResolver interface {
Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error)
}
+type Addresser interface {
+ AdvertizableAddrs() ([]ma.Multiaddr, error)
+}
+
+type Option struct {
+ bee260compatibility bool
+}
+
+// WithBee260Compatibility option ensures that only one underlay address is
+// passed to the peer in p2p protocol messages, so that nodes with version 2.6.0
+// and older can deserialize it. This option can be safely removed when bee
+// version 2.6.0 is deprecated.
+func WithBee260Compatibility(yes bool) func(*Option) {
+ return func(o *Option) {
+ o.bee260compatibility = yes
+ }
+}
+
// Service can perform initiate or handle a handshake between peers.
type Service struct {
signer crypto.Signer
@@ -74,6 +95,8 @@ type Service struct {
libp2pID libp2ppeer.ID
metrics metrics
picker p2p.Picker
+ mu sync.RWMutex
+ hostAddresser Addresser
}
// Info contains the information received from the handshake.
@@ -91,7 +114,7 @@ func (i *Info) LightString() string {
}
// New creates a new handshake Service.
-func New(signer crypto.Signer, advertisableAddresser AdvertisableAddressResolver, overlay swarm.Address, networkID uint64, fullNode bool, nonce []byte, welcomeMessage string, validateOverlay bool, ownPeerID libp2ppeer.ID, logger log.Logger) (*Service, error) {
+func New(signer crypto.Signer, advertisableAddresser AdvertisableAddressResolver, overlay swarm.Address, networkID uint64, fullNode bool, nonce []byte, hostAddresser Addresser, welcomeMessage string, validateOverlay bool, ownPeerID libp2ppeer.ID, logger log.Logger) (*Service, error) {
if len(welcomeMessage) > MaxWelcomeMessageLength {
return nil, ErrWelcomeMessageLength
}
@@ -107,6 +130,7 @@ func New(signer crypto.Signer, advertisableAddresser AdvertisableAddressResolver
libp2pID: ownPeerID,
logger: logger.WithName(loggerName).Register(),
metrics: newMetrics(),
+ hostAddresser: hostAddresser,
}
svc.welcomeMessage.Store(welcomeMessage)
@@ -114,29 +138,29 @@ func New(signer crypto.Signer, advertisableAddresser AdvertisableAddressResolver
}
func (s *Service) SetPicker(n p2p.Picker) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
s.picker = n
}
// Handshake initiates a handshake with a peer.
-func (s *Service) Handshake(ctx context.Context, stream p2p.Stream, peerMultiaddr ma.Multiaddr, peerID libp2ppeer.ID) (i *Info, err error) {
+func (s *Service) Handshake(ctx context.Context, stream p2p.Stream, peerMultiaddrs []ma.Multiaddr, opts ...func(*Option)) (i *Info, err error) {
loggerV1 := s.logger.V(1).Register()
+ o := new(Option)
+ for _, set := range opts {
+ set(o)
+ }
+
ctx, cancel := context.WithTimeout(ctx, handshakeTimeout)
defer cancel()
w, r := protobuf.NewWriterAndReader(stream)
- fullRemoteMA, err := buildFullMA(peerMultiaddr, peerID)
- if err != nil {
- return nil, err
- }
- fullRemoteMABytes, err := fullRemoteMA.MarshalBinary()
- if err != nil {
- return nil, err
- }
+ peerMultiaddrs = p2p.FilterBee260CompatibleUnderlays(o.bee260compatibility, peerMultiaddrs)
if err := w.WriteMsgWithContext(ctx, &pb.Syn{
- ObservedUnderlay: fullRemoteMABytes,
+ ObservedUnderlay: bzz.SerializeUnderlays(peerMultiaddrs),
}); err != nil {
return nil, fmt.Errorf("write syn message: %w", err)
}
@@ -146,32 +170,51 @@ func (s *Service) Handshake(ctx context.Context, stream p2p.Stream, peerMultiadd
return nil, fmt.Errorf("read synack message: %w", err)
}
- observedUnderlay, err := ma.NewMultiaddrBytes(resp.Syn.ObservedUnderlay)
+ observedUnderlays, err := bzz.DeserializeUnderlays(resp.Syn.ObservedUnderlay)
if err != nil {
return nil, ErrInvalidSyn
}
- observedUnderlayAddrInfo, err := libp2ppeer.AddrInfoFromP2pAddr(observedUnderlay)
- if err != nil {
- return nil, fmt.Errorf("extract addr from P2P: %w", err)
- }
+ advertisableUnderlays := make([]ma.Multiaddr, len(observedUnderlays))
+ for i, observedUnderlay := range observedUnderlays {
+ observedUnderlayAddrInfo, err := libp2ppeer.AddrInfoFromP2pAddr(observedUnderlay)
+ if err != nil {
+ return nil, fmt.Errorf("extract addr from P2P: %w", err)
+ }
- if s.libp2pID != observedUnderlayAddrInfo.ID {
- // NOTE eventually we will return error here, but for now we want to gather some statistics
- s.logger.Warning("received peer ID does not match ours", "their", observedUnderlayAddrInfo.ID, "ours", s.libp2pID)
- }
+ if s.libp2pID != observedUnderlayAddrInfo.ID {
+ return nil, fmt.Errorf("received peer ID %s does not match ours %s", observedUnderlayAddrInfo.ID.String(), s.libp2pID.String())
+ }
- advertisableUnderlay, err := s.advertisableAddresser.Resolve(observedUnderlay)
- if err != nil {
- return nil, err
+ advertisableUnderlay, err := s.advertisableAddresser.Resolve(observedUnderlay)
+ if err != nil {
+ return nil, err
+ }
+
+ advertisableUnderlays[i] = advertisableUnderlay
}
- bzzAddress, err := bzz.NewAddress(s.signer, advertisableUnderlay, s.overlay, s.networkID, s.nonce)
- if err != nil {
- return nil, err
+ if s.hostAddresser != nil {
+ hostAddrs, err := s.hostAddresser.AdvertizableAddrs()
+ if err != nil {
+ return nil, fmt.Errorf("get host advertizable addresses: %w", err)
+ }
+
+ advertisableUnderlays = append(advertisableUnderlays, hostAddrs...)
}
- advertisableUnderlayBytes, err := bzzAddress.Underlay.MarshalBinary()
+ // sort to remove potential duplicates
+ slices.SortFunc(advertisableUnderlays, func(a, b ma.Multiaddr) int {
+ return cmp.Compare(a.String(), b.String())
+ })
+ // remove duplicates
+ advertisableUnderlays = slices.CompactFunc(advertisableUnderlays, func(a, b ma.Multiaddr) bool {
+ return a.Equal(b)
+ })
+
+ advertisableUnderlays = p2p.FilterBee260CompatibleUnderlays(o.bee260compatibility, advertisableUnderlays)
+
+ bzzAddress, err := bzz.NewAddress(s.signer, advertisableUnderlays, s.overlay, s.networkID, s.nonce)
if err != nil {
return nil, err
}
@@ -189,7 +232,7 @@ func (s *Service) Handshake(ctx context.Context, stream p2p.Stream, peerMultiadd
welcomeMessage := s.GetWelcomeMessage()
msg := &pb.Ack{
Address: &pb.BzzAddress{
- Underlay: advertisableUnderlayBytes,
+ Underlay: bzz.SerializeUnderlays(bzzAddress.Underlays),
Overlay: bzzAddress.Overlay.Bytes(),
Signature: bzzAddress.Signature,
},
@@ -215,22 +258,18 @@ func (s *Service) Handshake(ctx context.Context, stream p2p.Stream, peerMultiadd
}
// Handle handles an incoming handshake from a peer.
-func (s *Service) Handle(ctx context.Context, stream p2p.Stream, remoteMultiaddr ma.Multiaddr, remotePeerID libp2ppeer.ID) (i *Info, err error) {
+func (s *Service) Handle(ctx context.Context, stream p2p.Stream, peerMultiaddrs []ma.Multiaddr, opts ...func(*Option)) (i *Info, err error) {
loggerV1 := s.logger.V(1).Register()
+ o := new(Option)
+ for _, set := range opts {
+ set(o)
+ }
+
ctx, cancel := context.WithTimeout(ctx, handshakeTimeout)
defer cancel()
w, r := protobuf.NewWriterAndReader(stream)
- fullRemoteMA, err := buildFullMA(remoteMultiaddr, remotePeerID)
- if err != nil {
- return nil, err
- }
-
- fullRemoteMABytes, err := fullRemoteMA.MarshalBinary()
- if err != nil {
- return nil, err
- }
var syn pb.Syn
if err := r.ReadMsgWithContext(ctx, &syn); err != nil {
@@ -239,35 +278,56 @@ func (s *Service) Handle(ctx context.Context, stream p2p.Stream, remoteMultiaddr
}
s.metrics.SynRx.Inc()
- observedUnderlay, err := ma.NewMultiaddrBytes(syn.ObservedUnderlay)
+ observedUnderlays, err := bzz.DeserializeUnderlays(syn.ObservedUnderlay)
if err != nil {
return nil, ErrInvalidSyn
}
- advertisableUnderlay, err := s.advertisableAddresser.Resolve(observedUnderlay)
- if err != nil {
- return nil, err
+ advertisableUnderlays := make([]ma.Multiaddr, len(observedUnderlays))
+ for i, observedUnderlay := range observedUnderlays {
+ advertisableUnderlay, err := s.advertisableAddresser.Resolve(observedUnderlay)
+ if err != nil {
+ return nil, err
+ }
+ advertisableUnderlays[i] = advertisableUnderlay
}
- bzzAddress, err := bzz.NewAddress(s.signer, advertisableUnderlay, s.overlay, s.networkID, s.nonce)
- if err != nil {
- return nil, err
+ if s.hostAddresser != nil {
+ hostAddrs, err := s.hostAddresser.AdvertizableAddrs()
+ if err != nil {
+ return nil, fmt.Errorf("get host advertizable addresses: %w", err)
+ }
+
+ advertisableUnderlays = append(advertisableUnderlays, hostAddrs...)
}
- advertisableUnderlayBytes, err := bzzAddress.Underlay.MarshalBinary()
+ // sort to remove potential duplicates
+ slices.SortFunc(advertisableUnderlays, func(a, b ma.Multiaddr) int {
+ return cmp.Compare(a.String(), b.String())
+ })
+ // remove duplicates
+ advertisableUnderlays = slices.CompactFunc(advertisableUnderlays, func(a, b ma.Multiaddr) bool {
+ return a.Equal(b)
+ })
+
+ advertisableUnderlays = p2p.FilterBee260CompatibleUnderlays(o.bee260compatibility, advertisableUnderlays)
+
+ bzzAddress, err := bzz.NewAddress(s.signer, advertisableUnderlays, s.overlay, s.networkID, s.nonce)
if err != nil {
return nil, err
}
welcomeMessage := s.GetWelcomeMessage()
+ peerMultiaddrs = p2p.FilterBee260CompatibleUnderlays(o.bee260compatibility, peerMultiaddrs)
+
if err := w.WriteMsgWithContext(ctx, &pb.SynAck{
Syn: &pb.Syn{
- ObservedUnderlay: fullRemoteMABytes,
+ ObservedUnderlay: bzz.SerializeUnderlays(peerMultiaddrs),
},
Ack: &pb.Ack{
Address: &pb.BzzAddress{
- Underlay: advertisableUnderlayBytes,
+ Underlay: bzz.SerializeUnderlays(bzzAddress.Underlays),
Overlay: bzzAddress.Overlay.Bytes(),
Signature: bzzAddress.Signature,
},
@@ -295,8 +355,12 @@ func (s *Service) Handle(ctx context.Context, stream p2p.Stream, remoteMultiaddr
overlay := swarm.NewAddress(ack.Address.Overlay)
- if s.picker != nil {
- if !s.picker.Pick(p2p.Peer{Address: overlay, FullNode: ack.FullNode}) {
+ s.mu.RLock()
+ picker := s.picker
+ s.mu.RUnlock()
+
+ if picker != nil {
+ if !picker.Pick(p2p.Peer{Address: overlay, FullNode: ack.FullNode}) {
return nil, ErrPicker
}
}
@@ -331,10 +395,6 @@ func (s *Service) GetWelcomeMessage() string {
return s.welcomeMessage.Load().(string)
}
-func buildFullMA(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
- return ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", addr.String(), peerID.String()))
-}
-
func (s *Service) parseCheckAck(ack *pb.Ack) (*bzz.Address, error) {
bzzAddress, err := bzz.ParseAddress(ack.Address.Underlay, ack.Address.Overlay, ack.Address.Signature, ack.Nonce, s.validateOverlay, s.networkID)
if err != nil {
diff --git a/pkg/p2p/libp2p/internal/handshake/handshake_test.go b/pkg/p2p/libp2p/internal/handshake/handshake_test.go
index e0afcc56d70..d455e6e8520 100644
--- a/pkg/p2p/libp2p/internal/handshake/handshake_test.go
+++ b/pkg/p2p/libp2p/internal/handshake/handshake_test.go
@@ -38,27 +38,44 @@ func TestHandshake(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- node2ma, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1634/p2p/16Uiu2HAkx8ULY8cTXhdVAcMmLcH9AsTKz6uBQ7DPLKRjMLgBVYkS")
+
+ node1ma2, err := ma.NewMultiaddr("/ip6/::1/tcp/46881/p2p/16Uiu2HAkx8ULY8cTXhdVAcMmLcH9AsTKz6uBQ7DPLKRjMLgBVYkA")
if err != nil {
t.Fatal(err)
}
- node1maBinary, err := node1ma.MarshalBinary()
+
+ node2ma, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1634/p2p/16Uiu2HAkx8ULY8cTXhdVAcMmLcH9AsTKz6uBQ7DPLKRjMLgBVYkS")
if err != nil {
t.Fatal(err)
}
- node2maBinary, err := node2ma.MarshalBinary()
+
+ node2ma2, err := ma.NewMultiaddr("/ip4/10.34.35.60/tcp/35315/p2p/16Uiu2HAkx8ULY8cTXhdVAcMmLcH9AsTKz6uBQ7DPLKRjMLgBVYkS")
if err != nil {
t.Fatal(err)
}
- node1AddrInfo, err := libp2ppeer.AddrInfoFromP2pAddr(node1ma)
+
+ node1mas := []ma.Multiaddr{node1ma, node1ma2}
+ node2mas := []ma.Multiaddr{node2ma, node2ma2}
+
+ node1maBinary := bzz.SerializeUnderlays(node1mas)
+ node2maBinary := bzz.SerializeUnderlays(node2mas)
+
+ node1AddrInfos, err := libp2ppeer.AddrInfosFromP2pAddrs(node1ma, node1ma2)
if err != nil {
t.Fatal(err)
}
- node2AddrInfo, err := libp2ppeer.AddrInfoFromP2pAddr(node2ma)
+ if len(node1AddrInfos) != 1 {
+ t.Fatal("must be same peer")
+ }
+ node1AddrInfo := node1AddrInfos[0]
+
+ node2AddrInfos, err := libp2ppeer.AddrInfosFromP2pAddrs(node2ma, node2ma2)
if err != nil {
t.Fatal(err)
}
-
+ if len(node2AddrInfos) != 1 {
+ t.Fatal("must be same peer")
+ }
privateKey1, err := crypto.GenerateSecp256k1Key()
if err != nil {
t.Fatal(err)
@@ -76,7 +93,7 @@ func TestHandshake(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- node1BzzAddress, err := bzz.NewAddress(signer1, node1ma, addr, networkID, nonce)
+ node1BzzAddress, err := bzz.NewAddress(signer1, []ma.Multiaddr{node1ma, node1ma2}, addr, networkID, nonce)
if err != nil {
t.Fatal(err)
}
@@ -84,7 +101,7 @@ func TestHandshake(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- node2BzzAddress, err := bzz.NewAddress(signer2, node2ma, addr2, networkID, nonce)
+ node2BzzAddress, err := bzz.NewAddress(signer2, []ma.Multiaddr{node2ma, node2ma2}, addr2, networkID, nonce)
if err != nil {
t.Fatal(err)
}
@@ -100,7 +117,7 @@ func TestHandshake(t *testing.T) {
aaddresser := &AdvertisableAddresserMock{}
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nonce, testWelcomeMessage, true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nonce, nil, testWelcomeMessage, true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -134,7 +151,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handshake(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handshake(context.Background(), stream1, node2mas)
if err != nil {
t.Fatal(err)
}
@@ -177,7 +194,7 @@ func TestHandshake(t *testing.T) {
})
t.Run("Handshake - picker error", func(t *testing.T) {
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nonce, "", true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nonce, nil, "", true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -209,7 +226,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- _, err = handshakeService.Handle(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ _, err = handshakeService.Handle(context.Background(), stream1, node2mas)
expectedErr := handshake.ErrPicker
if !errors.Is(err, expectedErr) {
t.Fatal("expected:", expectedErr, "got:", err)
@@ -220,7 +237,7 @@ func TestHandshake(t *testing.T) {
const LongMessage = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi consectetur urna ut lorem sollicitudin posuere. Donec sagittis laoreet sapien."
expectedErr := handshake.ErrWelcomeMessageLength
- _, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, LongMessage, true, node1AddrInfo.ID, logger)
+ _, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, nil, LongMessage, true, node1AddrInfo.ID, logger)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
@@ -254,7 +271,7 @@ func TestHandshake(t *testing.T) {
expectedErr := fmt.Errorf("write syn message: %w", testErr)
stream := &mock.Stream{}
stream.SetWriteErr(testErr, 0)
- res, err := handshakeService.Handshake(context.Background(), stream, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handshake(context.Background(), stream, node2mas)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
@@ -269,7 +286,7 @@ func TestHandshake(t *testing.T) {
expectedErr := fmt.Errorf("read synack message: %w", testErr)
stream := mock.NewStream(nil, &bytes.Buffer{})
stream.SetReadErr(testErr, 0)
- res, err := handshakeService.Handshake(context.Background(), stream, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handshake(context.Background(), stream, node2mas)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
@@ -308,7 +325,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handshake(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handshake(context.Background(), stream1, node2mas)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
@@ -342,7 +359,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handshake(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handshake(context.Background(), stream1, node2mas)
if res != nil {
t.Fatal("res should be nil")
}
@@ -375,11 +392,11 @@ func TestHandshake(t *testing.T) {
}); err != nil {
t.Fatal(err)
}
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nonce, testWelcomeMessage, true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nonce, nil, testWelcomeMessage, true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
- res, err := handshakeService.Handshake(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handshake(context.Background(), stream1, node2mas)
if res != nil {
t.Fatal("res should be nil")
}
@@ -419,7 +436,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handshake(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handshake(context.Background(), stream1, node2mas)
if !errors.Is(err, testError) {
t.Fatalf("expected error %v got %v", testError, err)
@@ -432,7 +449,7 @@ func TestHandshake(t *testing.T) {
})
t.Run("Handle - OK", func(t *testing.T) {
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nonce, "", true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nonce, nil, "", true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -461,7 +478,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handle(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handle(context.Background(), stream1, node2mas)
if err != nil {
t.Fatal(err)
}
@@ -490,7 +507,7 @@ func TestHandshake(t *testing.T) {
})
t.Run("Handle - read error ", func(t *testing.T) {
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, "", true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, nil, "", true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -498,7 +515,7 @@ func TestHandshake(t *testing.T) {
expectedErr := fmt.Errorf("read syn message: %w", testErr)
stream := &mock.Stream{}
stream.SetReadErr(testErr, 0)
- res, err := handshakeService.Handle(context.Background(), stream, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handle(context.Background(), stream, node2mas)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
@@ -509,7 +526,7 @@ func TestHandshake(t *testing.T) {
})
t.Run("Handle - write error ", func(t *testing.T) {
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, "", true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, nil, "", true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -525,7 +542,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handle(context.Background(), stream, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handle(context.Background(), stream, node2mas)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
@@ -536,7 +553,7 @@ func TestHandshake(t *testing.T) {
})
t.Run("Handle - ack read error ", func(t *testing.T) {
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, "", true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, nil, "", true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -554,7 +571,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handle(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handle(context.Background(), stream1, node2mas)
if err == nil || err.Error() != expectedErr.Error() {
t.Fatal("expected:", expectedErr, "got:", err)
}
@@ -565,7 +582,7 @@ func TestHandshake(t *testing.T) {
})
t.Run("Handle - networkID mismatch ", func(t *testing.T) {
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, "", true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, nil, "", true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -593,7 +610,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handle(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handle(context.Background(), stream1, node2mas)
if res != nil {
t.Fatal("res should be nil")
}
@@ -604,7 +621,7 @@ func TestHandshake(t *testing.T) {
})
t.Run("Handle - invalid ack", func(t *testing.T) {
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, "", true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, nil, "", true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -632,14 +649,14 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- _, err = handshakeService.Handle(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ _, err = handshakeService.Handle(context.Background(), stream1, node2mas)
if !errors.Is(err, handshake.ErrInvalidAck) {
t.Fatalf("expected %s, got %v", handshake.ErrInvalidAck, err)
}
})
t.Run("Handle - advertisable error", func(t *testing.T) {
- handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, "", true, node1AddrInfo.ID, logger)
+ handshakeService, err := handshake.New(signer1, aaddresser, node1Info.BzzAddress.Overlay, networkID, true, nil, nil, "", true, node1AddrInfo.ID, logger)
if err != nil {
t.Fatal(err)
}
@@ -661,7 +678,7 @@ func TestHandshake(t *testing.T) {
t.Fatal(err)
}
- res, err := handshakeService.Handle(context.Background(), stream1, node2AddrInfo.Addrs[0], node2AddrInfo.ID)
+ res, err := handshakeService.Handle(context.Background(), stream1, node2mas)
if !errors.Is(err, testError) {
t.Fatal("expected error")
}
diff --git a/pkg/p2p/libp2p/internal/reacher/heap.go b/pkg/p2p/libp2p/internal/reacher/heap.go
new file mode 100644
index 00000000000..10277b3df47
--- /dev/null
+++ b/pkg/p2p/libp2p/internal/reacher/heap.go
@@ -0,0 +1,33 @@
+// Copyright 2026 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reacher
+
+// peerHeap is a min-heap of peers ordered by retryAfter time.
+type peerHeap []*peer
+
+func (h peerHeap) Len() int { return len(h) }
+func (h peerHeap) Less(i, j int) bool { return h[i].retryAfter.Before(h[j].retryAfter) }
+func (h peerHeap) Swap(i, j int) {
+ h[i], h[j] = h[j], h[i]
+ h[i].index = i
+ h[j].index = j
+}
+
+func (h *peerHeap) Push(x any) {
+ n := len(*h)
+ p := x.(*peer)
+ p.index = n
+ *h = append(*h, p)
+}
+
+func (h *peerHeap) Pop() any {
+ old := *h
+ n := len(old)
+ p := old[n-1]
+ old[n-1] = nil // avoid memory leak
+ p.index = -1 // for safety
+ *h = old[0 : n-1]
+ return p
+}
diff --git a/pkg/p2p/libp2p/internal/reacher/metrics.go b/pkg/p2p/libp2p/internal/reacher/metrics.go
index a6019ba4a4e..a288f2fa8d5 100644
--- a/pkg/p2p/libp2p/internal/reacher/metrics.go
+++ b/pkg/p2p/libp2p/internal/reacher/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Swarm Authors. All rights reserved.
+// Copyright 2026 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -9,30 +9,48 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
+// metrics groups reacher related prometheus counters.
type metrics struct {
- Pings *prometheus.CounterVec
- PingTime *prometheus.HistogramVec
+ Peers prometheus.Gauge
+ PingAttemptCount prometheus.Counter
+ PingErrorCount prometheus.Counter
+ PingDuration prometheus.Histogram
}
+// newMetrics is a convenient constructor for creating new metrics.
func newMetrics() metrics {
- subsystem := "reacher"
+ const subsystem = "reacher"
return metrics{
- Pings: prometheus.NewCounterVec(prometheus.CounterOpts{
+ Peers: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
- Name: "pings",
- Help: "Ping counter.",
- }, []string{"status"}),
- PingTime: prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "peers",
+ Help: "Number of peers currently in the reacher queue.",
+ }),
+ PingAttemptCount: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
- Name: "ping_timer",
- Help: "Ping timer.",
- }, []string{"status"}),
+ Name: "ping_attempt_count",
+ Help: "Number of ping attempts.",
+ }),
+ PingErrorCount: prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: m.Namespace,
+ Subsystem: subsystem,
+ Name: "ping_error_count",
+ Help: "Number of failed ping attempts.",
+ }),
+ PingDuration: prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: m.Namespace,
+ Subsystem: subsystem,
+ Name: "ping_duration_seconds",
+ Help: "Ping latency distribution in seconds.",
+ Buckets: []float64{.1, .25, .5, 1, 2, 5, 10, 15},
+ }),
}
}
-func (s *reacher) Metrics() []prometheus.Collector {
- return m.PrometheusCollectorsFromFields(s.metrics)
+// Metrics returns set of prometheus collectors.
+func (r *reacher) Metrics() []prometheus.Collector {
+ return m.PrometheusCollectorsFromFields(r.metrics)
}
diff --git a/pkg/p2p/libp2p/internal/reacher/reacher.go b/pkg/p2p/libp2p/internal/reacher/reacher.go
index 18b1431229f..966b5978411 100644
--- a/pkg/p2p/libp2p/internal/reacher/reacher.go
+++ b/pkg/p2p/libp2p/internal/reacher/reacher.go
@@ -7,30 +7,41 @@
package reacher
import (
+ "container/heap"
"context"
+ "math/rand/v2"
"sync"
"time"
+ "github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/p2p"
"github.com/ethersphere/bee/v2/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
)
const (
- pingTimeout = time.Second * 15
- workers = 16
- retryAfterDuration = time.Minute * 5
+ pingTimeout = time.Second * 15
+ workers = 8
+ retryAfterDuration = time.Minute * 5
+ maxFailBackoffExponent = 4 // caps failure backoff at retryAfterDuration * 2^4 = 80 min
+ maxSuccessBackoffExponent = 2 // caps success backoff at retryAfterDuration * 2^2 = 20 min
+ jitterFactor = 0.2 // ±20% randomization on retry intervals
)
type peer struct {
- overlay swarm.Address
- addr ma.Multiaddr
- retryAfter time.Time
+ overlay swarm.Address
+ addr ma.Multiaddr
+ retryAfter time.Time
+ failCount int // consecutive ping failures for exponential backoff
+ successCount int // consecutive ping successes for exponential backoff
+ generation int // incremented on reconnect; guards against stale notifyResult
+ index int // index in the heap
}
type reacher struct {
- mu sync.Mutex
- peers map[string]*peer
+ mu sync.Mutex
+ peerHeap peerHeap // min-heap ordered by retryAfter
+ peerIndex map[string]*peer // lookup by overlay for O(1) access
newPeer chan struct{}
quit chan struct{}
@@ -38,27 +49,30 @@ type reacher struct {
pinger p2p.Pinger
notifier p2p.ReachableNotifier
- wg sync.WaitGroup
- metrics metrics
+ wg sync.WaitGroup
+ metrics metrics
options *Options
+ logger log.Logger
}
type Options struct {
PingTimeout time.Duration
Workers int
RetryAfterDuration time.Duration
+ JitterFactor float64 // ±N% randomization on retry intervals; 0 disables jitter
}
-func New(streamer p2p.Pinger, notifier p2p.ReachableNotifier, o *Options) *reacher {
-
+func New(streamer p2p.Pinger, notifier p2p.ReachableNotifier, o *Options, log log.Logger) *reacher {
r := &reacher{
- newPeer: make(chan struct{}, 1),
- quit: make(chan struct{}),
- pinger: streamer,
- peers: make(map[string]*peer),
- notifier: notifier,
- metrics: newMetrics(),
+ newPeer: make(chan struct{}, 1),
+ quit: make(chan struct{}),
+ pinger: streamer,
+ peerHeap: make(peerHeap, 0),
+ peerIndex: make(map[string]*peer),
+ notifier: notifier,
+ metrics: newMetrics(),
+ logger: log.WithName("reacher").Register(),
}
if o == nil {
@@ -66,6 +80,7 @@ func New(streamer p2p.Pinger, notifier p2p.ReachableNotifier, o *Options) *reach
PingTimeout: pingTimeout,
Workers: workers,
RetryAfterDuration: retryAfterDuration,
+ JitterFactor: jitterFactor,
}
}
r.options = o
@@ -77,10 +92,9 @@ func New(streamer p2p.Pinger, notifier p2p.ReachableNotifier, o *Options) *reach
}
func (r *reacher) manage() {
-
defer r.wg.Done()
- c := make(chan *peer)
+ c := make(chan peer)
defer close(c)
ctx, cancel := context.WithCancel(context.Background())
@@ -92,8 +106,7 @@ func (r *reacher) manage() {
}
for {
-
- p, tryAfter := r.tryAcquirePeer()
+ p, ok, tryAfter := r.tryAcquirePeer()
// if no peer is returned,
// wait until either more work or the closest retry-after time.
@@ -111,7 +124,7 @@ func (r *reacher) manage() {
}
// wait for work
- if p == nil {
+ if !ok {
select {
case <-r.quit:
return
@@ -129,71 +142,124 @@ func (r *reacher) manage() {
}
}
-func (r *reacher) ping(c chan *peer, ctx context.Context) {
-
+func (r *reacher) ping(c chan peer, ctx context.Context) {
defer r.wg.Done()
-
for p := range c {
-
- now := time.Now()
-
- ctxt, cancel := context.WithTimeout(ctx, r.options.PingTimeout)
- _, err := r.pinger.Ping(ctxt, p.addr)
- cancel()
-
- // ping was successful
- if err == nil {
- r.metrics.Pings.WithLabelValues("success").Inc()
- r.metrics.PingTime.WithLabelValues("success").Observe(time.Since(now).Seconds())
- r.notifier.Reachable(p.overlay, p2p.ReachabilityStatusPublic)
- } else {
- r.metrics.Pings.WithLabelValues("failure").Inc()
- r.metrics.PingTime.WithLabelValues("failure").Observe(time.Since(now).Seconds())
- r.notifier.Reachable(p.overlay, p2p.ReachabilityStatusPrivate)
- }
+ func() {
+ r.metrics.PingAttemptCount.Inc()
+ ctxt, cancel := context.WithTimeout(ctx, r.options.PingTimeout)
+ defer cancel()
+ start := time.Now()
+ rtt, err := r.pinger.Ping(ctxt, p.addr)
+ if err != nil {
+ r.metrics.PingDuration.Observe(time.Since(start).Seconds())
+ r.metrics.PingErrorCount.Inc()
+ r.logger.Debug("ping failed", "peer", p.overlay.String(), "addr", p.addr.String(), "error", err)
+ r.notifier.Reachable(p.overlay, p2p.ReachabilityStatusPrivate)
+ r.notifyResult(p.overlay, false, p.generation)
+ } else {
+ r.metrics.PingDuration.Observe(rtt.Seconds())
+ r.logger.Debug("ping succeeded", "peer", p.overlay.String(), "addr", p.addr.String(), "rtt", rtt)
+ r.notifier.Reachable(p.overlay, p2p.ReachabilityStatusPublic)
+ r.notifyResult(p.overlay, true, p.generation)
+ }
+ }()
}
}
-func (r *reacher) tryAcquirePeer() (*peer, time.Duration) {
+func (r *reacher) tryAcquirePeer() (peer, bool, time.Duration) {
r.mu.Lock()
defer r.mu.Unlock()
- var (
- now = time.Now()
- nextClosest time.Time
- )
+ if len(r.peerHeap) == 0 {
+ return peer{}, false, 0
+ }
- for _, p := range r.peers {
+ now := time.Now()
- // retry after has expired, retry
- if now.After(p.retryAfter) {
- p.retryAfter = time.Now().Add(r.options.RetryAfterDuration)
- return p, 0
- }
+ // Peek at the peer with the earliest retryAfter
+ p := r.peerHeap[0]
- // here, we find the peer with the earliest retry after
- if nextClosest.IsZero() || p.retryAfter.Before(nextClosest) {
- nextClosest = p.retryAfter
- }
+ // If retryAfter has not expired, return time to wait
+ if now.Before(p.retryAfter) {
+ return peer{}, false, time.Until(p.retryAfter)
}
- if nextClosest.IsZero() {
- return nil, 0
- }
+ // Set a temporary far-future retryAfter to prevent the manage loop from
+ // re-dispatching this peer while the ping is in flight. The actual
+ // retryAfter will be set by notifyResult after the ping completes.
+ p.retryAfter = now.Add(time.Hour)
+ heap.Fix(&r.peerHeap, p.index)
- // return the time to wait until the closest retry after
- return nil, time.Until(nextClosest)
+ // Return a copy so callers can read fields without holding the lock.
+ return *p, true, 0
}
// Connected adds a new peer to the queue for testing reachability.
+// If the peer already exists, its address is updated.
func (r *reacher) Connected(overlay swarm.Address, addr ma.Multiaddr) {
+ if addr == nil {
+ return
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ key := overlay.ByteString()
+ if existing, ok := r.peerIndex[key]; ok {
+ existing.addr = addr // Update address for reconnecting peer
+ existing.retryAfter = time.Time{} // Reset to trigger immediate re-ping
+ existing.failCount = 0 // Fresh start on reconnect
+ existing.successCount = 0 // Fresh start on reconnect
+ existing.generation++ // invalidate any in-flight notifyResult
+ heap.Fix(&r.peerHeap, existing.index)
+ } else {
+ p := &peer{overlay: overlay, addr: addr}
+ r.peerIndex[key] = p
+ heap.Push(&r.peerHeap, p)
+ r.metrics.Peers.Inc()
+ }
+
+ select {
+ case r.newPeer <- struct{}{}:
+ default:
+ }
+}
+
+// notifyResult updates the peer's retry schedule based on the ping outcome.
+// Both success and failure use exponential backoff with different caps:
+// - Success: 5m → 10m → 20m (capped at 2^2), resets failCount
+// - Failure: 5m → 10m → 20m → 40m → 80m (capped at 2^4), resets successCount
+//
+// The gen parameter is the generation captured when the ping was dispatched.
+// If the peer was reconnected (generation incremented) while the ping was
+// in flight, the stale result is discarded.
+func (r *reacher) notifyResult(overlay swarm.Address, success bool, gen int) {
r.mu.Lock()
defer r.mu.Unlock()
- if _, ok := r.peers[overlay.ByteString()]; !ok {
- r.peers[overlay.ByteString()] = &peer{overlay: overlay, addr: addr}
+ p, ok := r.peerIndex[overlay.ByteString()]
+ if !ok {
+ return // peer was disconnected while ping was in flight
+ }
+ if p.generation != gen {
+ return // peer was reconnected; discard stale result
+ }
+
+ if success {
+ p.failCount = 0
+ p.successCount++
+ backoff := min(p.successCount, maxSuccessBackoffExponent)
+ p.retryAfter = time.Now().Add(r.jitter(r.options.RetryAfterDuration * time.Duration(1< 1 {
+ t.Fatalf("overlay should be disconnected already")
+ }
+ return 0, errors.New("test error")
+ }
+ return 0, nil
+ }
+
+ reachableFunc := func(addr swarm.Address, b p2p.ReachabilityStatus) {}
+
+ mock := newMock(pingFunc, reachableFunc)
+
+ r := reacher.New(mock, mock, &defaultOptions, log.Noop)
+ testutil.CleanupCloser(t, r)
+
+ addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/7072/p2p/16Uiu2HAmTBuJT9LvNmBiQiNoTsxE5mtNy6YG3paw79m94CRa9sRb")
+ r.Connected(swarm.RandAddress(t), addr)
+ r.Connected(disconnectedOverlay, disconnectedMa)
+ r.Disconnected(disconnectedOverlay)
+ })
+}
+
+func TestAddressUpdateOnReconnect(t *testing.T) {
+ t.Parallel()
+
+ synctest.Test(t, func(t *testing.T) {
+ // Use 1 worker and no jitter to make timing deterministic.
+ options := reacher.Options{
+ PingTimeout: time.Second * 5,
+ Workers: 1,
+ RetryAfterDuration: time.Minute,
+ JitterFactor: 0,
+ }
+
+ overlay := swarm.RandAddress(t)
+ oldAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/7071/p2p/16Uiu2HAmTBuJT9LvNmBiQiNoTsxE5mtNy6YG3paw79m94CRa9sRb")
+ newAddr, _ := ma.NewMultiaddr("/ip4/192.168.1.1/tcp/7072/p2p/16Uiu2HAmTBuJT9LvNmBiQiNoTsxE5mtNy6YG3paw79m94CRa9sRb")
+
+ var pingsMu sync.Mutex
+ var pings []ma.Multiaddr
+ pinged := make(chan struct{}, 8)
+
+ pingFunc := func(_ context.Context, a ma.Multiaddr) (time.Duration, error) {
+ pingsMu.Lock()
+ pings = append(pings, a)
+ pingsMu.Unlock()
+ pinged <- struct{}{}
+ return 0, nil
+ }
+
+ reachableFunc := func(addr swarm.Address, status p2p.ReachabilityStatus) {}
+
+ mock := newMock(pingFunc, reachableFunc)
+
+ r := reacher.New(mock, mock, &options, log.Noop)
+ testutil.CleanupCloser(t, r)
+
+ // First connection with old address – triggers initial ping.
+ r.Connected(overlay, oldAddr)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for initial ping")
+ case <-pinged:
+ }
+
+ // Verify old address was pinged first.
+ pingsMu.Lock()
+ if len(pings) != 1 {
+ t.Fatalf("expected 1 ping after initial connect, got %d", len(pings))
+ }
+ if !pings[0].Equal(oldAddr) {
+ t.Fatalf("first ping should use old address, got %s", pings[0])
+ }
+ pingsMu.Unlock()
+
+ // Reconnect with a new address — should trigger immediate re-ping.
+ r.Connected(overlay, newAddr)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for reconnect ping")
+ case <-pinged:
+ }
+
+ // Verify the reconnect pinged the new address.
+ pingsMu.Lock()
+ if len(pings) != 2 {
+ t.Fatalf("expected 2 pings after reconnect, got %d", len(pings))
+ }
+ if !pings[1].Equal(newAddr) {
+ t.Fatalf("reconnect ping should use new address, got %s", pings[1])
+ }
+ pingsMu.Unlock()
+
+ // After reconnect success, successCount=1 so backoff = 2min (no jitter).
+ // Sleep past 2min to trigger the scheduled re-ping.
+ time.Sleep(3 * time.Minute)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for scheduled re-ping")
+ case <-pinged:
+ }
+
+ // Verify the scheduled re-ping used the new address.
+ pingsMu.Lock()
+ if len(pings) != 3 {
+ t.Fatalf("expected 3 pings after retry duration, got %d", len(pings))
+ }
+ if !pings[2].Equal(newAddr) {
+ t.Fatalf("scheduled re-ping should use new address, got %s", pings[2])
+ }
+ pingsMu.Unlock()
+ })
+}
+
+func TestHeapOrdering(t *testing.T) {
t.Parallel()
- var (
- disconnectedOverlay = swarm.RandAddress(t)
- disconnectedMa, _ = ma.NewMultiaddr("/ip4/127.0.0.1/tcp/7071/p2p/16Uiu2HAmTBuJT9LvNmBiQiNoTsxE5mtNy6YG3paw79m94CRa9sRb")
- )
-
- /*
- Because the Disconnected is called after Connected, it may be that one of the workers
- have picked up the peer already. So to test that the Disconnected really works,
- if the ping function pings the peer we are trying to disconnect, we return an error
- which triggers another attempt in the future, which by the, the peer should already be removed.
- */
- var errs atomic.Int64
- pingFunc := func(_ context.Context, a ma.Multiaddr) (time.Duration, error) {
- if a != nil && a.Equal(disconnectedMa) {
- errs.Inc()
- if errs.Load() > 1 {
- t.Fatalf("overlay should be disconnected already")
+ synctest.Test(t, func(t *testing.T) {
+ // Use single worker to ensure sequential processing
+ options := reacher.Options{
+ PingTimeout: time.Second * 5,
+ Workers: 1,
+ RetryAfterDuration: time.Second * 10,
+ }
+
+ var pingOrder []swarm.Address
+ var pingOrderMu sync.Mutex
+ allPinged := make(chan struct{})
+
+ overlay1 := swarm.RandAddress(t)
+ overlay2 := swarm.RandAddress(t)
+ overlay3 := swarm.RandAddress(t)
+ addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/7071/p2p/16Uiu2HAmTBuJT9LvNmBiQiNoTsxE5mtNy6YG3paw79m94CRa9sRb")
+
+ pingFunc := func(_ context.Context, _ ma.Multiaddr) (time.Duration, error) {
+ return 0, nil
+ }
+
+ reachableFunc := func(overlay swarm.Address, status p2p.ReachabilityStatus) {
+ pingOrderMu.Lock()
+ pingOrder = append(pingOrder, overlay)
+ if len(pingOrder) == 3 {
+ close(allPinged)
}
- return 0, errors.New("test error")
+ pingOrderMu.Unlock()
}
- return 0, nil
- }
- reachableFunc := func(addr swarm.Address, b p2p.ReachabilityStatus) {}
+ mock := newMock(pingFunc, reachableFunc)
+
+ r := reacher.New(mock, mock, &options, log.Noop)
+ testutil.CleanupCloser(t, r)
+
+ // Add peers - they should all be pinged since retryAfter starts at zero
+ r.Connected(overlay1, addr)
+ r.Connected(overlay2, addr)
+ r.Connected(overlay3, addr)
+
+ select {
+ case <-time.After(time.Second * 5):
+ t.Fatalf("test timed out, only %d peers pinged", len(pingOrder))
+ case <-allPinged:
+ }
+
+ // Verify all three peers were pinged
+ pingOrderMu.Lock()
+ defer pingOrderMu.Unlock()
+
+ if len(pingOrder) != 3 {
+ t.Fatalf("expected 3 peers pinged, got %d", len(pingOrder))
+ }
+
+ // Verify all overlays are present (order may vary due to heap with same retryAfter)
+ seen := make(map[string]bool)
+ for _, o := range pingOrder {
+ seen[o.String()] = true
+ }
+ if !seen[overlay1.String()] || !seen[overlay2.String()] || !seen[overlay3.String()] {
+ t.Fatalf("not all peers were pinged")
+ }
+ })
+}
+
+func TestBackoffOnFailure(t *testing.T) {
+ t.Parallel()
+
+ synctest.Test(t, func(t *testing.T) {
+ options := reacher.Options{
+ PingTimeout: time.Second * 5,
+ Workers: 1,
+ RetryAfterDuration: time.Minute, // base interval
+ JitterFactor: 0,
+ }
+
+ overlay := swarm.RandAddress(t)
+ addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/7071/p2p/16Uiu2HAmTBuJT9LvNmBiQiNoTsxE5mtNy6YG3paw79m94CRa9sRb")
+
+ var pingsMu sync.Mutex
+ var pingTimes []time.Time
+ pinged := make(chan struct{}, 16)
+
+ pingFunc := func(_ context.Context, _ ma.Multiaddr) (time.Duration, error) {
+ pingsMu.Lock()
+ pingTimes = append(pingTimes, time.Now())
+ pingsMu.Unlock()
+ pinged <- struct{}{}
+ return 0, errors.New("always fail")
+ }
+
+ reachableFunc := func(addr swarm.Address, status p2p.ReachabilityStatus) {}
+
+ mock := newMock(pingFunc, reachableFunc)
- mock := newMock(pingFunc, reachableFunc)
+ r := reacher.New(mock, mock, &options, log.Noop)
+ testutil.CleanupCloser(t, r)
- r := reacher.New(mock, mock, &defaultOptions)
- testutil.CleanupCloser(t, r)
+ r.Connected(overlay, addr)
- r.Connected(swarm.RandAddress(t), nil)
- r.Connected(disconnectedOverlay, disconnectedMa)
- r.Disconnected(disconnectedOverlay)
+ // Wait for the first ping (immediate).
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for first ping")
+ case <-pinged:
+ }
+
+ // After first failure: backoff = 2min (no jitter).
+ // Sleep 90s which is less than the 2min backoff.
+ time.Sleep(90 * time.Second)
+
+ pingsMu.Lock()
+ count := len(pingTimes)
+ pingsMu.Unlock()
+ if count != 1 {
+ t.Fatalf("expected 1 ping after 90s (backoff=2min), got %d", count)
+ }
+
+ // Sleep past the 2min backoff to trigger the second ping.
+ time.Sleep(55 * time.Second)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for second ping")
+ case <-pinged:
+ }
+
+ pingsMu.Lock()
+ count = len(pingTimes)
+ pingsMu.Unlock()
+ if count != 2 {
+ t.Fatalf("expected 2 pings after 2min backoff, got %d", count)
+ }
+
+ // After second failure: backoff = 4min (no jitter).
+ // Sleep past 4min to trigger the third ping.
+ time.Sleep(5 * time.Minute)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for third ping")
+ case <-pinged:
+ }
+
+ pingsMu.Lock()
+ count = len(pingTimes)
+ pingsMu.Unlock()
+ if count != 3 {
+ t.Fatalf("expected 3 pings after 4min backoff, got %d", count)
+ }
+ })
+}
+
+func TestBackoffOnSuccess(t *testing.T) {
+ t.Parallel()
+
+ synctest.Test(t, func(t *testing.T) {
+ options := reacher.Options{
+ PingTimeout: time.Second * 5,
+ Workers: 1,
+ RetryAfterDuration: time.Minute,
+ JitterFactor: 0,
+ }
+
+ overlay := swarm.RandAddress(t)
+ addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/7071/p2p/16Uiu2HAmTBuJT9LvNmBiQiNoTsxE5mtNy6YG3paw79m94CRa9sRb")
+
+ var pingsMu sync.Mutex
+ var pingCount int
+ pinged := make(chan struct{}, 16)
+
+ pingFunc := func(_ context.Context, _ ma.Multiaddr) (time.Duration, error) {
+ pingsMu.Lock()
+ pingCount++
+ pingsMu.Unlock()
+ pinged <- struct{}{}
+ return 0, nil // always succeed
+ }
+
+ reachableFunc := func(addr swarm.Address, status p2p.ReachabilityStatus) {}
+
+ mock := newMock(pingFunc, reachableFunc)
+
+ r := reacher.New(mock, mock, &options, log.Noop)
+ testutil.CleanupCloser(t, r)
+
+ r.Connected(overlay, addr)
+
+ // First ping (immediate).
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for first ping")
+ case <-pinged:
+ }
+
+ // After first success: backoff = 2min (no jitter).
+ // Sleep 90s which is less than 2min backoff.
+ time.Sleep(90 * time.Second)
+
+ pingsMu.Lock()
+ if pingCount != 1 {
+ t.Fatalf("expected 1 ping after 90s (backoff=2min), got %d", pingCount)
+ }
+ pingsMu.Unlock()
+
+ // Sleep past the 2min backoff to trigger the second ping.
+ time.Sleep(55 * time.Second)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for second ping")
+ case <-pinged:
+ }
+
+ pingsMu.Lock()
+ if pingCount != 2 {
+ t.Fatalf("expected 2 pings after 2min backoff, got %d", pingCount)
+ }
+ pingsMu.Unlock()
+
+ // After second success: successCount=2 (capped at 2), backoff = 4min (no jitter).
+ // Sleep past 4min to trigger the third ping.
+ time.Sleep(5 * time.Minute)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for third ping")
+ case <-pinged:
+ }
+
+ pingsMu.Lock()
+ if pingCount != 3 {
+ t.Fatalf("expected 3 pings after capped 4min backoff, got %d", pingCount)
+ }
+ pingsMu.Unlock()
+
+ // Third success: still capped at 4min (no jitter).
+ // Sleep 1min which is well below 4min.
+ time.Sleep(time.Minute)
+
+ pingsMu.Lock()
+ if pingCount != 3 {
+ t.Fatalf("expected still 3 pings after 1min (capped backoff=4min), got %d", pingCount)
+ }
+ pingsMu.Unlock()
+
+ // Sleep past the 4min backoff to confirm cap holds.
+ time.Sleep(4 * time.Minute)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for fourth ping")
+ case <-pinged:
+ }
+
+ pingsMu.Lock()
+ if pingCount != 4 {
+ t.Fatalf("expected 4 pings after capped backoff, got %d", pingCount)
+ }
+ pingsMu.Unlock()
+ })
+}
+
+func TestBackoffResetOnReconnect(t *testing.T) {
+ t.Parallel()
+
+ synctest.Test(t, func(t *testing.T) {
+ options := reacher.Options{
+ PingTimeout: time.Second * 5,
+ Workers: 1,
+ RetryAfterDuration: time.Minute,
+ JitterFactor: 0,
+ }
+
+ overlay := swarm.RandAddress(t)
+ addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/7071/p2p/16Uiu2HAmTBuJT9LvNmBiQiNoTsxE5mtNy6YG3paw79m94CRa9sRb")
+
+ var pingsMu sync.Mutex
+ var pingCount int
+ pinged := make(chan struct{}, 16)
+
+ pingFunc := func(_ context.Context, _ ma.Multiaddr) (time.Duration, error) {
+ pingsMu.Lock()
+ pingCount++
+ pingsMu.Unlock()
+ pinged <- struct{}{}
+ return 0, errors.New("always fail")
+ }
+
+ reachableFunc := func(addr swarm.Address, status p2p.ReachabilityStatus) {}
+
+ mock := newMock(pingFunc, reachableFunc)
+
+ r := reacher.New(mock, mock, &options, log.Noop)
+ testutil.CleanupCloser(t, r)
+
+ // First connection — immediate ping, then fail.
+ r.Connected(overlay, addr)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for first ping")
+ case <-pinged:
+ }
+
+ // After first failure, backoff = 2min.
+ // Reconnect resets failCount and triggers immediate re-ping.
+ r.Connected(overlay, addr)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for reconnect ping")
+ case <-pinged:
+ }
+
+ // After reconnect failure, backoff should be 2min again (not 4min),
+ // because failCount was reset. Sleep past 2min.
+ time.Sleep(3 * time.Minute)
+
+ select {
+ case <-time.After(time.Second * 10):
+ t.Fatal("timed out waiting for post-reconnect retry")
+ case <-pinged:
+ }
+
+ pingsMu.Lock()
+ if pingCount != 3 {
+ t.Fatalf("expected 3 pings (initial + reconnect + retry), got %d", pingCount)
+ }
+ pingsMu.Unlock()
+ })
}
type mock struct {
diff --git a/pkg/p2p/libp2p/libp2p.go b/pkg/p2p/libp2p/libp2p.go
index 4d4c3773f92..9b696cd26e2 100644
--- a/pkg/p2p/libp2p/libp2p.go
+++ b/pkg/p2p/libp2p/libp2p.go
@@ -10,29 +10,37 @@ import (
"errors"
"fmt"
"net"
+ "net/netip"
"os"
"runtime"
+ "slices"
"strconv"
"strings"
"sync"
"time"
+ ocprom "contrib.go.opencensus.io/exporter/prometheus"
+ "github.com/coreos/go-semver/semver"
"github.com/ethersphere/bee/v2"
"github.com/ethersphere/bee/v2/pkg/addressbook"
"github.com/ethersphere/bee/v2/pkg/bzz"
beecrypto "github.com/ethersphere/bee/v2/pkg/crypto"
"github.com/ethersphere/bee/v2/pkg/log"
+ m2 "github.com/ethersphere/bee/v2/pkg/metrics"
"github.com/ethersphere/bee/v2/pkg/p2p"
"github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/blocklist"
"github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/breaker"
"github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake"
"github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/reacher"
+ libp2pmock "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/mock"
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/topology"
"github.com/ethersphere/bee/v2/pkg/topology/lightnode"
"github.com/ethersphere/bee/v2/pkg/tracing"
+ p2pforge "github.com/ipshipyard/p2p-forge/client"
"github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/config"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
@@ -48,15 +56,13 @@ import (
libp2pping "github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
-
+ libp2prate "github.com/libp2p/go-libp2p/x/rate"
ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
"github.com/multiformats/go-multistream"
- "go.uber.org/atomic"
-
- ocprom "contrib.go.opencensus.io/exporter/prometheus"
- m2 "github.com/ethersphere/bee/v2/pkg/metrics"
- rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
"github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/atomic"
+ "go.uber.org/zap"
)
// loggerName is the tree path name of the logger for this package.
@@ -75,6 +81,8 @@ const (
defaultLightNodeLimit = 100
peerUserAgentTimeout = time.Second
+ peerstoreWaitAddrsTimeout = 10 * time.Second
+
defaultHeadersRWTimeout = 10 * time.Second
IncomingStreamCountLimit = 5_000
@@ -82,33 +90,35 @@ const (
)
type Service struct {
- ctx context.Context
- host host.Host
- natManager basichost.NATManager
- natAddrResolver *staticAddressResolver
- autonatDialer host.Host
- pingDialer host.Host
- libp2pPeerstore peerstore.Peerstore
- metrics metrics
- networkID uint64
- handshakeService *handshake.Service
- addressbook addressbook.Putter
- peers *peerRegistry
- connectionBreaker breaker.Interface
- blocklist *blocklist.Blocklist
- protocols []p2p.ProtocolSpec
- notifier p2p.PickyNotifier
- logger log.Logger
- tracer *tracing.Tracer
- ready chan struct{}
- halt chan struct{}
- lightNodes lightnodes
- lightNodeLimit int
- protocolsmu sync.RWMutex
- reacher p2p.Reacher
- networkStatus atomic.Int32
- HeadersRWTimeout time.Duration
- autoNAT autonat.AutoNAT
+ ctx context.Context
+ host host.Host
+ natManager basichost.NATManager
+ autonatDialer host.Host
+ pingDialer host.Host
+ libp2pPeerstore peerstore.Peerstore
+ metrics metrics
+ networkID uint64
+ handshakeService *handshake.Service
+ addressbook addressbook.Putter
+ peers *peerRegistry
+ connectionBreaker breaker.Interface
+ blocklist *blocklist.Blocklist
+ protocols []p2p.ProtocolSpec
+ notifier p2p.PickyNotifier
+ logger log.Logger
+ tracer *tracing.Tracer
+ ready chan struct{}
+ halt chan struct{}
+ lightNodes lightnodes
+ lightNodeLimit int
+ protocolsmu sync.RWMutex
+ reacher p2p.Reacher
+ networkStatus atomic.Int32
+ HeadersRWTimeout time.Duration
+ autoNAT autonat.AutoNAT
+ autoTLSCertManager autoTLSCertManager
+ zapLogger *zap.Logger
+ enabledTransports map[bzz.TransportType]bool
}
type lightnodes interface {
@@ -120,51 +130,63 @@ type lightnodes interface {
}
type Options struct {
- PrivateKey *ecdsa.PrivateKey
- NATAddr string
- EnableWS bool
- FullNode bool
- LightNodeLimit int
- WelcomeMessage string
- Nonce []byte
- ValidateOverlay bool
- hostFactory func(...libp2p.Option) (host.Host, error)
- HeadersRWTimeout time.Duration
- Registry *prometheus.Registry
+ PrivateKey *ecdsa.PrivateKey
+ NATAddr string
+ NATWSSAddr string
+ EnableWS bool
+ EnableWSS bool
+ WSSAddr string
+ AutoTLSStorageDir string
+ AutoTLSCAEndpoint string
+ AutoTLSDomain string
+ AutoTLSRegistrationEndpoint string
+ FullNode bool
+ LightNodeLimit int
+ WelcomeMessage string
+ Nonce []byte
+ ValidateOverlay bool
+ hostFactory func(...libp2p.Option) (host.Host, error)
+ HeadersRWTimeout time.Duration
+ Registry *prometheus.Registry
+ autoTLSCertManager autoTLSCertManager
}
-func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay swarm.Address, addr string, ab addressbook.Putter, storer storage.StateStorer, lightNodes *lightnode.Container, logger log.Logger, tracer *tracing.Tracer, o Options) (*Service, error) {
- host, port, err := net.SplitHostPort(addr)
+func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay swarm.Address, addr string, ab addressbook.Putter, storer storage.StateStorer, lightNodes *lightnode.Container, logger log.Logger, tracer *tracing.Tracer, o Options) (s *Service, returnErr error) {
+ logger = logger.WithName(loggerName).Register()
+
+ parsedAddr, err := parseAddress(addr)
if err != nil {
- return nil, fmt.Errorf("address: %w", err)
+ return nil, err
}
- ip4Addr := "0.0.0.0"
- ip6Addr := "::"
+ var listenAddrs []string
- if host != "" {
- ip := net.ParseIP(host)
- if ip4 := ip.To4(); ip4 != nil {
- ip4Addr = ip4.String()
- ip6Addr = ""
- } else if ip6 := ip.To16(); ip6 != nil {
- ip6Addr = ip6.String()
- ip4Addr = ""
+ if parsedAddr.IP4 != "" {
+ listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s", parsedAddr.IP4, parsedAddr.Port))
+ if o.EnableWS {
+ listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/ws", parsedAddr.IP4, parsedAddr.Port))
}
}
- var listenAddrs []string
- if ip4Addr != "" {
- listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s", ip4Addr, port))
+ if parsedAddr.IP6 != "" {
+ listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s", parsedAddr.IP6, parsedAddr.Port))
if o.EnableWS {
- listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/ws", ip4Addr, port))
+ listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s/ws", parsedAddr.IP6, parsedAddr.Port))
}
}
- if ip6Addr != "" {
- listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s", ip6Addr, port))
- if o.EnableWS {
- listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s/ws", ip6Addr, port))
+ if o.EnableWSS {
+ parsedWssAddr, err := parseAddress(o.WSSAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ if parsedWssAddr.IP4 != "" {
+ listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/tls/sni/*.%s/ws", parsedWssAddr.IP4, parsedWssAddr.Port, o.AutoTLSDomain))
+ }
+
+ if parsedWssAddr.IP6 != "" {
+ listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s/tls/sni/*.%s/ws", parsedWssAddr.IP6, parsedWssAddr.Port, o.AutoTLSDomain))
}
}
@@ -175,7 +197,7 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
}
if o.Registry != nil {
- rcmgrObs.MustRegisterWith(o.Registry)
+ rcmgr.MustRegisterWith(o.Registry)
}
_, err = ocprom.NewExporter(ocprom.Options{
@@ -201,18 +223,93 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
// The resource manager expects a limiter, se we create one from our limits.
limiter := rcmgr.NewFixedLimiter(limits)
- str, err := rcmgrObs.NewStatsTraceReporter()
+ str, err := rcmgr.NewStatsTraceReporter()
if err != nil {
return nil, err
}
- rm, err := rcmgr.NewResourceManager(limiter, rcmgr.WithTraceReporter(str))
+ limitPerIp := rcmgr.WithLimitPerSubnet(
+ []rcmgr.ConnLimitPerSubnet{{PrefixLength: 32, ConnCount: 200}}, // IPv4 /32 (Single IP) -> 200 conns
+ []rcmgr.ConnLimitPerSubnet{{PrefixLength: 56, ConnCount: 200}}, // IPv6 /56 subnet -> 200 conns
+ )
+
+ // Custom rate limiter for connection attempts
+ // 20 peers cluster adaptation:
+ // Allow bursts of connection attempts (e.g. restart) but prevent DDOS.
+ connLimiter := &libp2prate.Limiter{
+ // Allow unlimited local connections (same as default)
+ NetworkPrefixLimits: []libp2prate.PrefixLimit{
+ {Prefix: netip.MustParsePrefix("127.0.0.0/8"), Limit: libp2prate.Limit{}},
+ {Prefix: netip.MustParsePrefix("::1/128"), Limit: libp2prate.Limit{}},
+ },
+ GlobalLimit: libp2prate.Limit{}, // Unlimited global
+ SubnetRateLimiter: libp2prate.SubnetLimiter{
+ IPv4SubnetLimits: []libp2prate.SubnetLimit{
+ {
+ PrefixLength: 32, // Apply limits per individual IPv4 address (/32)
+ // Allow 10 connection attempts per second per IP, burst up to 40
+ Limit: libp2prate.Limit{RPS: 10.0, Burst: 40},
+ },
+ },
+ IPv6SubnetLimits: []libp2prate.SubnetLimit{
+ {
+ PrefixLength: 56, // Apply limits per /56 IPv6 subnet
+ // Allow 10 connection attempts per second per IP, burst up to 40
+ // Subnet-level limiting prevents flooding from multiple addresses in the same block.
+ Limit: libp2prate.Limit{RPS: 10.0, Burst: 40},
+ },
+ },
+ // Duration to retain state for an IP or subnet after it becomes inactive.
+ GracePeriod: 10 * time.Second,
+ },
+ }
+
+ rm, err := rcmgr.NewResourceManager(limiter, rcmgr.WithTraceReporter(str), limitPerIp, rcmgr.WithConnRateLimiters(connLimiter))
if err != nil {
return nil, err
}
var natManager basichost.NATManager
+ var certManager autoTLSCertManager
+ var zapLogger *zap.Logger
+
+ // AutoTLS is only needed for WSS
+ enableAutoTLS := o.EnableWSS
+
+ if enableAutoTLS {
+ if o.autoTLSCertManager != nil {
+ certManager = o.autoTLSCertManager
+ } else {
+ forgeMgr, err := newP2PForgeCertManager(logger, P2PForgeOptions{
+ Domain: o.AutoTLSDomain,
+ RegistrationEndpoint: o.AutoTLSRegistrationEndpoint,
+ CAEndpoint: o.AutoTLSCAEndpoint,
+ StorageDir: o.AutoTLSStorageDir,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ certManager = forgeMgr.CertMgr()
+ zapLogger = forgeMgr.ZapLogger()
+ }
+
+ defer func() {
+ if returnErr != nil {
+ // call if service is not constructed
+ certManager.Stop()
+ _ = zapLogger.Sync()
+ }
+ }()
+
+ if err := certManager.Start(); err != nil {
+ return nil, fmt.Errorf("start AutoTLS certificate manager: %w", err)
+ }
+
+ logger.Info("AutoTLS certificate manager initialized")
+ }
+
opts := []libp2p.Option{
libp2p.ListenAddrStrings(listenAddrs...),
security,
@@ -222,7 +319,7 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
libp2p.ResourceManager(rm),
}
- if o.NATAddr == "" {
+ if o.NATAddr == "" && o.NATWSSAddr == "" {
opts = append(opts,
libp2p.NATManager(func(n network.Network) basichost.NATManager {
natManager = basichost.NewNATManager(n)
@@ -245,10 +342,49 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
libp2p.Transport(tcp.NewTCPTransport, tcp.DisableReuseport()),
}
- if o.EnableWS {
+ var tcpResolver handshake.AdvertisableAddressResolver
+ if o.NATAddr != "" {
+ r, err := newStaticAddressResolver(o.NATAddr, net.LookupIP)
+ if err != nil {
+ return nil, fmt.Errorf("static nat: %w", err)
+ }
+ tcpResolver = r
+ }
+
+ var wssResolver handshake.AdvertisableAddressResolver
+ if o.EnableWSS && o.NATWSSAddr != "" {
+ r, err := newStaticAddressResolver(o.NATWSSAddr, net.LookupIP)
+ if err != nil {
+ return nil, fmt.Errorf("static wss nat: %w", err)
+ }
+ wssResolver = r
+ }
+
+ if o.EnableWSS {
+ wsOpt := ws.WithTLSConfig(certManager.TLSConfig())
+ transports = append(transports, libp2p.Transport(ws.New, wsOpt))
+ } else if o.EnableWS {
transports = append(transports, libp2p.Transport(ws.New))
}
+ compositeResolver := newCompositeAddressResolver(tcpResolver, wssResolver)
+
+ var addrFactory config.AddrsFactory
+ if o.EnableWSS {
+ certManagerFactory := certManager.AddressFactory()
+ addrFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ addrs = includeNatResolvedAddresses(addrs, compositeResolver, logger)
+ addrs = certManagerFactory(addrs)
+ return addrs
+ }
+ } else {
+ addrFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ return includeNatResolvedAddresses(addrs, compositeResolver, logger)
+ }
+ }
+
+ opts = append(opts, libp2p.AddrsFactory(addrFactory))
+
opts = append(opts, transports...)
if o.hostFactory == nil {
@@ -261,6 +397,18 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
return nil, err
}
+ if enableAutoTLS {
+ switch cm := certManager.(type) {
+ case *p2pforge.P2PForgeCertMgr:
+ cm.ProvideHost(h)
+ case *libp2pmock.MockP2PForgeCertMgr:
+ if err := cm.ProvideHost(h); err != nil {
+ return nil, fmt.Errorf("failed to provide host to MockP2PForgeCertMgr: %w", err)
+ }
+ default:
+ return nil, fmt.Errorf("unknown cert manager type")
+ }
+ }
// Support same non default security and transport options as
// original host.
dialer, err := o.hostFactory(append(transports, security)...)
@@ -268,7 +416,13 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
return nil, err
}
- options := []autonat.Option{autonat.EnableService(dialer.Network())}
+ if o.HeadersRWTimeout == 0 {
+ o.HeadersRWTimeout = defaultHeadersRWTimeout
+ }
+
+ options := []autonat.Option{
+ autonat.EnableService(dialer.Network()),
+ }
val, err := strconv.ParseBool(reachabilityOverridePublic)
if err != nil {
@@ -286,25 +440,7 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
return nil, fmt.Errorf("autonat: %w", err)
}
- if o.HeadersRWTimeout == 0 {
- o.HeadersRWTimeout = defaultHeadersRWTimeout
- }
-
- var advertisableAddresser handshake.AdvertisableAddressResolver
- var natAddrResolver *staticAddressResolver
- if o.NATAddr == "" {
- advertisableAddresser = &UpnpAddressResolver{
- host: h,
- }
- } else {
- natAddrResolver, err = newStaticAddressResolver(o.NATAddr, net.LookupIP)
- if err != nil {
- return nil, fmt.Errorf("static nat: %w", err)
- }
- advertisableAddresser = natAddrResolver
- }
-
- handshakeService, err := handshake.New(signer, advertisableAddresser, overlay, networkID, o.FullNode, o.Nonce, o.WelcomeMessage, o.ValidateOverlay, h.ID(), logger)
+ handshakeService, err := handshake.New(signer, newCompositeAddressResolver(tcpResolver, wssResolver), overlay, networkID, o.FullNode, o.Nonce, newHostAddresser(h), o.WelcomeMessage, o.ValidateOverlay, h.ID(), logger)
if err != nil {
return nil, fmt.Errorf("handshake service: %w", err)
}
@@ -320,28 +456,34 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
}
peerRegistry := newPeerRegistry()
- s := &Service{
- ctx: ctx,
- host: h,
- natManager: natManager,
- natAddrResolver: natAddrResolver,
- autonatDialer: dialer,
- pingDialer: pingDialer,
- handshakeService: handshakeService,
- libp2pPeerstore: libp2pPeerstore,
- metrics: newMetrics(),
- networkID: networkID,
- peers: peerRegistry,
- addressbook: ab,
- blocklist: blocklist.NewBlocklist(storer),
- logger: logger.WithName(loggerName).Register(),
- tracer: tracer,
- connectionBreaker: breaker.NewBreaker(breaker.Options{}), // use default options
- ready: make(chan struct{}),
- halt: make(chan struct{}),
- lightNodes: lightNodes,
- HeadersRWTimeout: o.HeadersRWTimeout,
- autoNAT: autoNAT,
+ s = &Service{
+ ctx: ctx,
+ host: h,
+ natManager: natManager,
+ autonatDialer: dialer,
+ pingDialer: pingDialer,
+ handshakeService: handshakeService,
+ libp2pPeerstore: libp2pPeerstore,
+ metrics: newMetrics(),
+ networkID: networkID,
+ peers: peerRegistry,
+ addressbook: ab,
+ blocklist: blocklist.NewBlocklist(storer),
+ logger: logger,
+ tracer: tracer,
+ connectionBreaker: breaker.NewBreaker(breaker.Options{}), // use default options
+ ready: make(chan struct{}),
+ halt: make(chan struct{}),
+ lightNodes: lightNodes,
+ HeadersRWTimeout: o.HeadersRWTimeout,
+ autoNAT: autoNAT,
+ autoTLSCertManager: certManager,
+ zapLogger: zapLogger,
+ enabledTransports: map[bzz.TransportType]bool{
+ bzz.TransportTCP: true, // TCP transport is always included
+ bzz.TransportWS: o.EnableWS,
+ bzz.TransportWSS: o.EnableWSS,
+ },
}
peerRegistry.setDisconnecter(s)
@@ -367,8 +509,39 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
return s, nil
}
+type parsedAddress struct {
+ IP4 string
+ IP6 string
+ Port string
+}
+
+func parseAddress(addr string) (*parsedAddress, error) {
+ host, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, fmt.Errorf("parse address %s: %w", addr, err)
+ }
+
+ res := &parsedAddress{
+ IP4: "0.0.0.0",
+ IP6: "::",
+ Port: port,
+ }
+
+ if host != "" {
+ ip := net.ParseIP(host)
+ if ip4parsed := ip.To4(); ip4parsed != nil {
+ res.IP4 = ip4parsed.String()
+ res.IP6 = ""
+ } else if ip6parsed := ip.To16(); ip6parsed != nil {
+ res.IP6 = ip6parsed.String()
+ res.IP4 = ""
+ }
+ }
+ return res, nil
+}
+
func (s *Service) reachabilityWorker() error {
- sub, err := s.host.EventBus().Subscribe([]interface{}{new(event.EvtLocalReachabilityChanged)})
+ sub, err := s.host.EventBus().Subscribe([]any{new(event.EvtLocalReachabilityChanged)})
if err != nil {
return fmt.Errorf("failed subscribing to reachability event %w", err)
}
@@ -410,7 +583,40 @@ func (s *Service) handleIncoming(stream network.Stream) {
peerID := stream.Conn().RemotePeer()
handshakeStream := newStream(stream, s.metrics)
- i, err := s.handshakeService.Handle(s.ctx, handshakeStream, stream.Conn().RemoteMultiaddr(), peerID)
+
+ peerAddrs, err := s.peerMultiaddrs(s.ctx, peerID)
+ if err != nil {
+ s.logger.Debug("stream handler: handshake: build remote multiaddrs", "peer_id", peerID, "error", err)
+ s.logger.Error(nil, "stream handler: handshake: build remote multiaddrs", "peer_id", peerID)
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(peerID)
+ return
+ }
+
+ // For the handshake we always need an observed address (ObservedUnderlay).
+ // If the peerstore had no addresses, fall back to RemoteMultiaddr for the
+ // handshake only. This typically means the peer is behind NAT and its
+ // address is not reachable from the outside.
+ observedAddrs := peerAddrs
+ if len(observedAddrs) == 0 {
+ observedAddrs, err = buildFullMAs([]ma.Multiaddr{stream.Conn().RemoteMultiaddr()}, peerID)
+ if err != nil {
+ s.logger.Debug("stream handler: handshake: build remote multiaddrs fallback", "peer_id", peerID, "error", err)
+ s.logger.Error(nil, "stream handler: handshake: build remote multiaddrs fallback", "peer_id", peerID)
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(peerID)
+ return
+ }
+ }
+
+ bee260Compat := s.bee260BackwardCompatibility(peerID)
+
+ i, err := s.handshakeService.Handle(
+ s.ctx,
+ handshakeStream,
+ observedAddrs,
+ handshake.WithBee260Compatibility(bee260Compat),
+ )
if err != nil {
s.logger.Debug("stream handler: handshake: handle failed", "peer_id", peerID, "error", err)
s.logger.Error(nil, "stream handler: handshake: handle failed", "peer_id", peerID)
@@ -426,7 +632,7 @@ func (s *Service) handleIncoming(stream network.Stream) {
s.logger.Debug("stream handler: blocklisting: exists failed", "peer_address", overlay, "error", err)
s.logger.Error(nil, "stream handler: internal error while connecting with peer", "peer_address", overlay)
_ = handshakeStream.Reset()
- _ = s.host.Network().ClosePeer(peerID)
+ _ = stream.Conn().Close()
return
}
@@ -442,7 +648,7 @@ func (s *Service) handleIncoming(stream network.Stream) {
if err = handshakeStream.FullClose(); err != nil {
s.logger.Debug("stream handler: could not close stream", "peer_address", overlay, "error", err)
s.logger.Error(nil, "stream handler: unable to handshake with peer", "peer_address", overlay)
- _ = s.Disconnect(overlay, "unable to close handshake stream")
+ _ = stream.Conn().Close()
}
return
}
@@ -454,7 +660,8 @@ func (s *Service) handleIncoming(stream network.Stream) {
return
}
- if i.FullNode {
+ // Only persist in addressbook when we have real peerstore addresses.
+ if i.FullNode && len(peerAddrs) > 0 {
err = s.addressbook.Put(i.BzzAddress.Overlay, *i.BzzAddress)
if err != nil {
s.logger.Debug("stream handler: addressbook put error", "peer_id", peerID, "error", err)
@@ -537,8 +744,8 @@ func (s *Service) handleIncoming(stream network.Stream) {
return
}
- if s.reacher != nil {
- s.reacher.Connected(overlay, i.BzzAddress.Underlay)
+ if len(peerAddrs) > 0 {
+ s.notifyReacherConnected(overlay, peerAddrs)
}
peerUserAgent := appendSpace(s.peerUserAgent(s.ctx, peerID))
@@ -548,10 +755,50 @@ func (s *Service) handleIncoming(stream network.Stream) {
s.logger.Debug("stream handler: successfully connected to peer (inbound)", "address", i.BzzAddress.Overlay, "light", i.LightString(), "user_agent", peerUserAgent)
}
+// isTransportSupported checks if the given transport type is supported by this service.
+func (s *Service) isTransportSupported(t bzz.TransportType) bool {
+ return s.enabledTransports[t]
+}
+
+// filterSupportedAddresses filters multiaddresses to only include those
+// that are supported by the available transports (TCP, WS, WSS).
+func (s *Service) filterSupportedAddresses(addrs []ma.Multiaddr) []ma.Multiaddr {
+ if len(addrs) == 0 {
+ return addrs
+ }
+
+ filtered := make([]ma.Multiaddr, 0, len(addrs))
+ for _, addr := range addrs {
+ if s.isTransportSupported(bzz.ClassifyTransport(addr)) {
+ filtered = append(filtered, addr)
+ }
+ }
+
+ return filtered
+}
+
+func (s *Service) notifyReacherConnected(overlay swarm.Address, underlays []ma.Multiaddr) {
+ if s.reacher == nil {
+ return
+ }
+
+ filteredAddrs := s.filterSupportedAddresses(underlays)
+ if len(filteredAddrs) == 0 {
+ s.logger.Debug("no supported addresses for reacher", "overlay", overlay, "total", len(underlays))
+ return
+ }
+
+ bestAddr := bzz.SelectBestAdvertisedAddress(filteredAddrs, nil)
+
+ s.logger.Debug("selected reacher address", "overlay", overlay, "address", bestAddr, "filtered", len(filteredAddrs), "total", len(underlays))
+
+ s.reacher.Connected(overlay, bestAddr)
+}
+
func (s *Service) SetPickyNotifier(n p2p.PickyNotifier) {
s.handshakeService.SetPicker(n)
s.notifier = n
- s.reacher = reacher.New(s, n, nil)
+ s.reacher = reacher.New(s, n, nil, s.logger)
}
func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) {
@@ -645,23 +892,37 @@ func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) {
}
func (s *Service) Addresses() (addresses []ma.Multiaddr, err error) {
- for _, addr := range s.host.Addrs() {
- a, err := buildUnderlayAddress(addr, s.host.ID())
- if err != nil {
- return nil, err
- }
+ return buildFullMAs(s.host.Addrs(), s.host.ID())
+}
- addresses = append(addresses, a)
+func includeNatResolvedAddresses(addrs []ma.Multiaddr, advertisableAddresser handshake.AdvertisableAddressResolver, logger log.Logger) (addresses []ma.Multiaddr) {
+ if advertisableAddresser == nil {
+ return addrs
}
- if s.natAddrResolver != nil && len(addresses) > 0 {
- a, err := s.natAddrResolver.Resolve(addresses[0])
+
+ allAddrs := make([]ma.Multiaddr, 0, len(addrs))
+
+ for _, addr := range addrs {
+ allAddrs = append(allAddrs, addr)
+
+ resolved, err := advertisableAddresser.Resolve(addr)
if err != nil {
- return nil, err
+ logger.Warning("could not resolve address", "addr", addr, "error", err)
+ continue
+ }
+
+ if resolved.Equal(addr) {
+ continue
+ }
+
+ if !slices.ContainsFunc(allAddrs, func(a ma.Multiaddr) bool {
+ return resolved.Equal(a)
+ }) {
+ allAddrs = append(allAddrs, resolved)
}
- addresses = append(addresses, a)
}
- return addresses, nil
+ return allAddrs
}
func (s *Service) NATManager() basichost.NATManager {
@@ -698,50 +959,85 @@ func buildHostAddress(peerID libp2ppeer.ID) (ma.Multiaddr, error) {
return ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", peerID.String()))
}
-func buildUnderlayAddress(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
- // Build host multiaddress
- hostAddr, err := buildHostAddress(peerID)
- if err != nil {
- return nil, err
- }
-
- return addr.Encapsulate(hostAddr), nil
-}
-
-func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error) {
+func (s *Service) Connect(ctx context.Context, addrs []ma.Multiaddr) (address *bzz.Address, err error) {
loggerV1 := s.logger.V(1).Register()
defer func() {
err = s.determineCurrentNetworkStatus(err)
}()
- // Extract the peer ID from the multiaddr.
- info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
- if err != nil {
- return nil, fmt.Errorf("addr from p2p: %w", err)
+ filteredAddrs := s.filterSupportedAddresses(addrs)
+ if len(filteredAddrs) == 0 {
+ s.logger.Debug("no supported addresses to connect", "total_addrs", len(addrs))
+ return nil, p2p.ErrUnsupportedAddresses
}
- hostAddr, err := buildHostAddress(info.ID)
- if err != nil {
- return nil, fmt.Errorf("build host address: %w", err)
- }
+ var info *libp2ppeer.AddrInfo
+ var peerID libp2ppeer.ID
+ var connectErr error
+ skippedSelf := false
- remoteAddr := addr.Decapsulate(hostAddr)
+ // Try to connect to each underlay address one by one.
+ for _, addr := range filteredAddrs {
+ // Extract the peer ID from the multiaddr.
+ ai, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
+ if err != nil {
+ return nil, fmt.Errorf("addr from p2p: %w", err)
+ }
- if overlay, found := s.peers.isConnected(info.ID, remoteAddr); found {
- address = &bzz.Address{
- Overlay: overlay,
- Underlay: addr,
+ info = ai
+ peerID = ai.ID
+
+ // Check if attempting to connect to self
+ if peerID == s.host.ID() {
+ s.logger.Debug("skipping connection to self", "peer_id", peerID, "underlay", info.Addrs)
+ skippedSelf = true
+ continue
}
- return address, p2p.ErrAlreadyConnected
- }
- if err := s.connectionBreaker.Execute(func() error { return s.host.Connect(ctx, *info) }); err != nil {
- if errors.Is(err, breaker.ErrClosed) {
- s.metrics.ConnectBreakerCount.Inc()
- return nil, p2p.NewConnectionBackoffError(err, s.connectionBreaker.ClosedUntil())
+ hostAddr, err := buildHostAddress(info.ID)
+ if err != nil {
+ return nil, fmt.Errorf("build host address: %w", err)
}
- return nil, err
+
+ remoteAddr := addr.Decapsulate(hostAddr)
+
+ if overlay, found := s.peers.isConnected(info.ID, remoteAddr); found {
+ address = &bzz.Address{
+ Overlay: overlay,
+ Underlays: []ma.Multiaddr{addr},
+ }
+ return address, p2p.ErrAlreadyConnected
+ }
+
+ connectCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
+ err = s.connectionBreaker.Execute(func() error { return s.host.Connect(connectCtx, *info) })
+ cancel()
+
+ if err != nil {
+ if errors.Is(err, breaker.ErrClosed) {
+ s.metrics.ConnectBreakerCount.Inc()
+ return nil, p2p.NewConnectionBackoffError(err, s.connectionBreaker.ClosedUntil())
+ }
+ s.logger.Warning("libp2p connect", "peer_id", peerID, "underlay", info.Addrs, "error", err)
+ connectErr = err
+ continue
+ }
+
+ connectErr = nil
+ }
+
+ if connectErr != nil {
+ return nil, fmt.Errorf("libp2p connect: %w", connectErr)
+ }
+
+ // If we skipped all addresses due to self-connection, return an error
+ if skippedSelf {
+ return nil, fmt.Errorf("cannot connect to self")
+ }
+
+ if info == nil {
+ return nil, fmt.Errorf("unable to identify peer from addresses: %v", filteredAddrs)
}
stream, err := s.newStreamForPeerID(ctx, info.ID, handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName)
@@ -751,7 +1047,32 @@ func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.
}
handshakeStream := newStream(stream, s.metrics)
- i, err := s.handshakeService.Handshake(ctx, handshakeStream, stream.Conn().RemoteMultiaddr(), stream.Conn().RemotePeer())
+
+ peerAddrs, err := s.peerMultiaddrs(ctx, peerID)
+ if err != nil {
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(peerID)
+ return nil, fmt.Errorf("build peer multiaddrs: %w", err)
+ }
+
+ observedAddrs := peerAddrs
+ if len(observedAddrs) == 0 {
+ observedAddrs, err = buildFullMAs([]ma.Multiaddr{stream.Conn().RemoteMultiaddr()}, peerID)
+ if err != nil {
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(peerID)
+ return nil, fmt.Errorf("build peer multiaddrs fallback: %w", err)
+ }
+ }
+
+ bee260Compat := s.bee260BackwardCompatibility(peerID)
+
+ i, err := s.handshakeService.Handshake(
+ s.ctx,
+ handshakeStream,
+ observedAddrs,
+ handshake.WithBee260Compatibility(bee260Compat),
+ )
if err != nil {
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(info.ID)
@@ -796,6 +1117,10 @@ func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.
return nil, fmt.Errorf("connect full close %w", err)
}
+ if !s.peers.Exists(overlay) {
+ return nil, p2p.ErrPeerNotFound
+ }
+
if i.FullNode {
err = s.addressbook.Put(overlay, *i.BzzAddress)
if err != nil {
@@ -824,14 +1149,13 @@ func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.
s.metrics.CreatedConnectionCount.Inc()
- if s.reacher != nil {
- s.reacher.Connected(overlay, i.BzzAddress.Underlay)
+ if len(peerAddrs) > 0 {
+ s.notifyReacherConnected(overlay, peerAddrs)
}
- peerUserAgent := appendSpace(s.peerUserAgent(ctx, info.ID))
-
- loggerV1.Debug("successfully connected to peer (outbound)", "addresses", i.BzzAddress.ShortString(), "light", i.LightString(), "user_agent", peerUserAgent)
- s.logger.Debug("successfully connected to peer (outbound)", "address", i.BzzAddress.Overlay, "light", i.LightString(), "user_agent", peerUserAgent)
+ peerUA := appendSpace(s.peerUserAgent(ctx, peerID))
+ loggerV1.Debug("successfully connected to peer (outbound)", "addresses", i.BzzAddress.ShortString(), "light", i.LightString(), "user_agent", peerUA)
+ s.logger.Debug("successfully connected to peer (outbound)", "address", overlay, "light", i.LightString(), "user_agent", peerUA)
return i.BzzAddress, nil
}
@@ -932,6 +1256,12 @@ func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers
return nil, p2p.ErrPeerNotFound
}
+ // Verify if we really have an active connection
+ if s.host.Network().Connectedness(peerID) != network.Connected {
+ _ = s.Disconnect(overlay, "registry-host mismatch in NewStream")
+ return nil, p2p.ErrPeerNotFound
+ }
+
streamlibp2p, err := s.newStreamForPeerID(ctx, peerID, protocolName, protocolVersion, streamName)
if err != nil {
return nil, fmt.Errorf("new stream for peerid: %w", err)
@@ -961,6 +1291,11 @@ func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers
func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID, protocolName, protocolVersion, streamName string) (network.Stream, error) {
swarmStreamName := p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName)
+
+ if s.host.Network().Connectedness(peerID) != network.Connected {
+ s.logger.Debug("newStreamForPeerID: host not connected to peer, this will trigger a dial", "peer_id", peerID, "protocol", swarmStreamName)
+ }
+
st, err := s.host.NewStream(ctx, peerID, protocol.ID(swarmStreamName))
if err != nil {
if st != nil {
@@ -981,6 +1316,9 @@ func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID,
}
func (s *Service) Close() error {
+ if s.autoTLSCertManager != nil {
+ s.autoTLSCertManager.Stop()
+ }
if err := s.libp2pPeerstore.Close(); err != nil {
return err
}
@@ -1005,6 +1343,9 @@ func (s *Service) Close() error {
return err
}
}
+ if s.zapLogger != nil {
+ _ = s.zapLogger.Sync()
+ }
return s.host.Close()
}
@@ -1061,7 +1402,7 @@ func (s *Service) peerUserAgent(ctx context.Context, peerID libp2ppeer.ID) strin
ctx, cancel := context.WithTimeout(ctx, peerUserAgentTimeout)
defer cancel()
var (
- v interface{}
+ v any
err error
)
// Peerstore may not contain all keys and values right after the connections is created.
@@ -1119,6 +1460,55 @@ func (s *Service) determineCurrentNetworkStatus(err error) error {
return err
}
+// peerMultiaddrs builds full multiaddresses for a peer using the peerstore.
+func (s *Service) peerMultiaddrs(ctx context.Context, peerID libp2ppeer.ID) ([]ma.Multiaddr, error) {
+ waitPeersCtx, cancel := context.WithTimeout(ctx, peerstoreWaitAddrsTimeout)
+ defer cancel()
+
+ mas := waitPeerAddrs(waitPeersCtx, s.host.Peerstore(), peerID)
+
+ return buildFullMAs(mas, peerID)
+}
+
+// IsBee260 implements p2p.Bee260CompatibilityStreamer interface.
+// It checks if a peer is running Bee version older than 2.7.0.
+func (s *Service) IsBee260(overlay swarm.Address) bool {
+ peerID, found := s.peers.peerID(overlay)
+ if !found {
+ return false
+ }
+ return s.bee260BackwardCompatibility(peerID)
+}
+
+var version270 = *semver.Must(semver.NewVersion("2.7.0"))
+
+func (s *Service) bee260BackwardCompatibility(peerID libp2ppeer.ID) bool {
+ if compat, found := s.peers.bee260(peerID); found {
+ return compat
+ }
+
+ userAgent := s.peerUserAgent(s.ctx, peerID)
+ p := strings.SplitN(userAgent, " ", 2)
+ if len(p) != 2 {
+ return false
+ }
+ version := strings.TrimPrefix(p[0], "bee/")
+ v, err := semver.NewVersion(version)
+ if err != nil {
+ return false
+ }
+
+ // Compare major.minor.patch only (ignore pre-release)
+ // This way 2.7.0-rc12 is treated as >= 2.7.0
+ vCore, err := semver.NewVersion(fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch))
+ if err != nil {
+ return false
+ }
+ result := vCore.LessThan(version270)
+ s.peers.setBee260(peerID, result)
+ return result
+}
+
// appendSpace adds a leading space character if the string is not empty.
// It is useful for constructing log messages with conditional substrings.
func appendSpace(s string) string {
@@ -1178,3 +1568,106 @@ func isNetworkOrHostUnreachableError(err error) bool {
}
return false
}
+
+type compositeAddressResolver struct {
+ tcpResolver handshake.AdvertisableAddressResolver
+ wssResolver handshake.AdvertisableAddressResolver
+}
+
+func newCompositeAddressResolver(tcpResolver, wssResolver handshake.AdvertisableAddressResolver) handshake.AdvertisableAddressResolver {
+ return &compositeAddressResolver{
+ tcpResolver: tcpResolver,
+ wssResolver: wssResolver,
+ }
+}
+
+func (c *compositeAddressResolver) Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error) {
+ protocols := observedAddress.Protocols()
+
+ containsProtocol := func(protocols []ma.Protocol, code int) bool {
+ return slices.ContainsFunc(protocols, func(p ma.Protocol) bool { return p.Code == code })
+ }
+
+ // ma.P_WSS protocol is deprecated, multiaddrs should comtain WS and TLS protocols for WSS
+ isWSS := containsProtocol(protocols, ma.P_WS) && containsProtocol(protocols, ma.P_TLS)
+
+ if isWSS {
+ if c.wssResolver != nil {
+ return c.wssResolver.Resolve(observedAddress)
+ }
+ } else {
+ if c.tcpResolver != nil {
+ return c.tcpResolver.Resolve(observedAddress)
+ }
+ }
+ return observedAddress, nil
+}
+
+type hostAddresser struct {
+ host host.Host
+}
+
+func newHostAddresser(host host.Host) *hostAddresser {
+ return &hostAddresser{
+ host: host,
+ }
+}
+
+func (h *hostAddresser) AdvertizableAddrs() ([]ma.Multiaddr, error) {
+ addrs := make([]ma.Multiaddr, 0)
+ for _, a := range h.host.Addrs() {
+ if manet.IsIPLoopback(a) {
+ continue
+ }
+ addrs = append(addrs, a)
+ }
+ return buildFullMAs(addrs, h.host.ID())
+}
+
+func buildFullMAs(addrs []ma.Multiaddr, peerID libp2ppeer.ID) ([]ma.Multiaddr, error) {
+ fullMAs := make([]ma.Multiaddr, 0)
+ for _, addr := range addrs {
+ res, err := buildFullMA(addr, peerID)
+ if err != nil {
+ return nil, err
+ }
+ if slices.ContainsFunc(fullMAs, func(a ma.Multiaddr) bool {
+ return a.Equal(res)
+ }) {
+ continue
+ }
+ fullMAs = append(fullMAs, res)
+ }
+ return fullMAs, nil
+}
+
+func buildFullMA(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
+ if _, err := addr.ValueForProtocol(ma.P_P2P); err == nil {
+ return addr, nil
+ }
+ return ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", addr.String(), peerID.String()))
+}
+
+// waitPeerAddrs is used to reliably get remote addresses from libp2p peerstore
+// as sometimes addresses are not available soon enough from its Addrs() method.
+func waitPeerAddrs(ctx context.Context, s peerstore.Peerstore, peerID libp2ppeer.ID) []ma.Multiaddr {
+ ctx, cancel := context.WithCancel(ctx) // cancel the addrStream when this function exits
+ defer cancel()
+
+ // ensure that the AddrStream will receive addresses by creating it before Addrs() is called
+ // this may happen just after the connection is established and peerstore is not updated
+ addrStream := s.AddrStream(ctx, peerID)
+
+ addrs := s.Addrs(peerID)
+ if len(addrs) > 0 {
+ return addrs
+ }
+
+ select {
+ case addr := <-addrStream:
+ // return the first address as it arrives
+ return []ma.Multiaddr{addr}
+ case <-ctx.Done():
+ return s.Addrs(peerID)
+ }
+}
diff --git a/pkg/p2p/libp2p/libp2p_test.go b/pkg/p2p/libp2p/libp2p_test.go
index c467247547e..ede9cbbcb79 100644
--- a/pkg/p2p/libp2p/libp2p_test.go
+++ b/pkg/p2p/libp2p/libp2p_test.go
@@ -27,13 +27,14 @@ import (
)
type libp2pServiceOpts struct {
- Logger log.Logger
- Addressbook addressbook.Interface
- PrivateKey *ecdsa.PrivateKey
- MockPeerKey *ecdsa.PrivateKey
- libp2pOpts libp2p.Options
- lightNodes *lightnode.Container
- notifier p2p.PickyNotifier
+ Logger log.Logger
+ Addressbook addressbook.Interface
+ PrivateKey *ecdsa.PrivateKey
+ MockPeerKey *ecdsa.PrivateKey
+ libp2pOpts libp2p.Options
+ lightNodes *lightnode.Container
+ notifier p2p.PickyNotifier
+ autoTLSCertManager libp2p.AutoTLSCertManager
}
// newService constructs a new libp2p service.
@@ -81,6 +82,10 @@ func newService(t *testing.T, networkID uint64, o libp2pServiceOpts) (s *libp2p.
opts := o.libp2pOpts
opts.Nonce = nonce
+ if o.autoTLSCertManager != nil {
+ libp2p.SetAutoTLSCertManager(&opts, o.autoTLSCertManager)
+ }
+
s, err = libp2p.New(ctx, crypto.NewDefaultSigner(swarmKey), networkID, overlay, addr, o.Addressbook, statestore, o.lightNodes, o.Logger, nil, opts)
if err != nil {
t.Fatal(err)
@@ -128,7 +133,7 @@ func expectPeersEventually(t *testing.T, s *libp2p.Service, addrs ...swarm.Addre
t.Helper()
var peers []p2p.Peer
- err := spinlock.Wait(time.Second, func() bool {
+ err := spinlock.Wait(5*time.Second, func() bool {
peers = s.Peers()
return len(peers) == len(addrs)
@@ -152,12 +157,12 @@ func expectPeersEventually(t *testing.T, s *libp2p.Service, addrs ...swarm.Addre
}
}
-func serviceUnderlayAddress(t *testing.T, s *libp2p.Service) multiaddr.Multiaddr {
+func serviceUnderlayAddress(t *testing.T, s *libp2p.Service) []multiaddr.Multiaddr {
t.Helper()
addrs, err := s.Addresses()
if err != nil {
t.Fatal(err)
}
- return addrs[0]
+ return addrs
}
diff --git a/pkg/p2p/libp2p/libp2ptest/buffer.go b/pkg/p2p/libp2p/libp2ptest/buffer.go
new file mode 100644
index 00000000000..ed98e09b285
--- /dev/null
+++ b/pkg/p2p/libp2p/libp2ptest/buffer.go
@@ -0,0 +1,28 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package libp2ptest
+
+import (
+ "bytes"
+ "sync"
+)
+
+// SafeBuffer is a thread-safe bytes.Buffer.
+type SafeBuffer struct {
+ b bytes.Buffer
+ m sync.Mutex
+}
+
+func (s *SafeBuffer) Write(p []byte) (n int, err error) {
+ s.m.Lock()
+ defer s.m.Unlock()
+ return s.b.Write(p)
+}
+
+func (s *SafeBuffer) String() string {
+ s.m.Lock()
+ defer s.m.Unlock()
+ return s.b.String()
+}
diff --git a/pkg/p2p/libp2p/libp2ptest/service.go b/pkg/p2p/libp2p/libp2ptest/service.go
new file mode 100644
index 00000000000..8a62531f342
--- /dev/null
+++ b/pkg/p2p/libp2p/libp2ptest/service.go
@@ -0,0 +1,69 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package libp2ptest
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethersphere/bee/v2/pkg/addressbook"
+ "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p"
+ "github.com/ethersphere/bee/v2/pkg/statestore/mock"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology/lightnode"
+ "github.com/ethersphere/bee/v2/pkg/util/testutil"
+)
+
+// NewLibp2pService creates a new libp2p service for testing purposes.
+func NewLibp2pService(t *testing.T, networkID uint64, logger log.Logger) (*libp2p.Service, swarm.Address) {
+ t.Helper()
+
+ swarmKey, err := crypto.GenerateSecp256k1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ nonce := common.HexToHash("0x1").Bytes()
+
+ overlay, err := crypto.NewOverlayAddress(swarmKey.PublicKey, networkID, nonce)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ addr := ":0"
+
+ statestore := mock.NewStateStore()
+ ab := addressbook.New(statestore)
+
+ libp2pKey, err := crypto.GenerateSecp256r1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ lightNodes := lightnode.NewContainer(overlay)
+
+ opts := libp2p.Options{
+ PrivateKey: libp2pKey,
+ Nonce: nonce,
+ FullNode: true,
+ NATAddr: "127.0.0.1:0", // Disable default NAT manager
+ }
+
+ s, err := libp2p.New(ctx, crypto.NewDefaultSigner(swarmKey), networkID, overlay, addr, ab, statestore, lightNodes, logger, nil, opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ testutil.CleanupCloser(t, s)
+
+ _ = s.Ready()
+
+ return s, overlay
+}
diff --git a/pkg/p2p/libp2p/metrics.go b/pkg/p2p/libp2p/metrics.go
index 378ec26f6b0..a98035913e4 100644
--- a/pkg/p2p/libp2p/metrics.go
+++ b/pkg/p2p/libp2p/metrics.go
@@ -121,7 +121,11 @@ func newMetrics() metrics {
}
func (s *Service) Metrics() []prometheus.Collector {
- return append(m.PrometheusCollectorsFromFields(s.metrics), s.handshakeService.Metrics()...)
+ collectors := append(m.PrometheusCollectorsFromFields(s.metrics), s.handshakeService.Metrics()...)
+ if mc, ok := s.reacher.(interface{ Metrics() []prometheus.Collector }); ok {
+ collectors = append(collectors, mc.Metrics()...)
+ }
+ return collectors
}
// StatusMetrics exposes metrics that are exposed on the status protocol.
diff --git a/pkg/p2p/libp2p/mock/mock_certmagic.go b/pkg/p2p/libp2p/mock/mock_certmagic.go
new file mode 100644
index 00000000000..8337f26e8ab
--- /dev/null
+++ b/pkg/p2p/libp2p/mock/mock_certmagic.go
@@ -0,0 +1,158 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package mock
+
+import (
+ "context"
+ "crypto/tls"
+ "sync"
+
+ _ "embed"
+
+ "github.com/libp2p/go-libp2p/config"
+ "github.com/libp2p/go-libp2p/core/host"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+//go:embed testdata/cert.pem
+var certPEM []byte
+
+//go:embed testdata/key.pem
+var keyPEM []byte
+
+// MockFileStorage is a minimal implementation of certmagic.FileStorage for testing.
+type MockFileStorage struct {
+ path string
+}
+
+func NewMockFileStorage(path string) *MockFileStorage {
+ return &MockFileStorage{path: path}
+}
+
+func (m *MockFileStorage) Store(_ context.Context, _ string, _ []byte) error {
+ return nil
+}
+
+func (m *MockFileStorage) Load(_ context.Context, _ string) ([]byte, error) {
+ return nil, nil
+}
+
+func (m *MockFileStorage) Delete(_ context.Context, _ string) error {
+ return nil
+}
+
+func (m *MockFileStorage) Exists(_ context.Context, _ string) (bool, error) {
+ return false, nil
+}
+
+func (m *MockFileStorage) List(_ context.Context, _ string, _ bool) ([]string, error) {
+ return nil, nil
+}
+
+func (m *MockFileStorage) Lock(_ context.Context, _ string) error {
+ return nil
+}
+
+func (m *MockFileStorage) Unlock(_ context.Context, _ string) error {
+ return nil
+}
+
+// MockCache is a minimal implementation of certmagic.Cache for testing.
+type MockCache struct {
+ stopped bool
+ mu sync.Mutex
+}
+
+func NewMockCache() *MockCache {
+ return &MockCache{}
+}
+
+func (m *MockCache) Stop() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.stopped = true
+}
+
+// MockConfig is a minimal implementation of certmagic.Config for testing.
+type MockConfig struct {
+ cache *MockCache
+}
+
+func NewMockConfig() *MockConfig {
+ return &MockConfig{cache: NewMockCache()}
+}
+
+func (m *MockConfig) TLSConfig() *tls.Config {
+ return &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ }
+}
+
+// MockP2PForgeCertMgr is a mock implementation of p2pforge.P2PForgeCertMgr.
+type MockP2PForgeCertMgr struct {
+ cache *MockCache
+ onCertLoaded func()
+ started bool
+ mu sync.Mutex
+ ProvideHost func(host.Host) error
+}
+
+func NewMockP2PForgeCertMgr(onCertLoaded func()) *MockP2PForgeCertMgr {
+ return &MockP2PForgeCertMgr{
+ cache: NewMockCache(),
+ onCertLoaded: onCertLoaded,
+ ProvideHost: func(_ host.Host) error { return nil },
+ }
+}
+
+func (m *MockP2PForgeCertMgr) Start() error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if m.started {
+ return nil
+ }
+ m.started = true
+ if m.onCertLoaded != nil {
+ go func() {
+ m.onCertLoaded()
+ }()
+ }
+ return nil
+}
+func (m *MockP2PForgeCertMgr) Stop() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if !m.started {
+ return
+ }
+ m.started = false
+ m.cache.Stop()
+}
+
+func (m *MockP2PForgeCertMgr) TLSConfig() *tls.Config {
+ // Use tls.X509KeyPair to create a certificate from the hardcoded strings
+ cert, err := tls.X509KeyPair(certPEM, keyPEM)
+ if err != nil {
+ // This should not fail if the strings are pasted correctly
+ return nil
+ }
+
+ return &tls.Config{Certificates: []tls.Certificate{cert}}
+}
+
+func (m *MockP2PForgeCertMgr) SetOnCertLoaded(cb func()) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.onCertLoaded = cb
+}
+
+func (m *MockP2PForgeCertMgr) AddressFactory() config.AddrsFactory {
+ return func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ return addrs
+ }
+}
+
+func (m *MockP2PForgeCertMgr) GetCache() *MockCache {
+ return m.cache
+}
diff --git a/pkg/p2p/libp2p/mock/mock_certmagic_test.go b/pkg/p2p/libp2p/mock/mock_certmagic_test.go
new file mode 100644
index 00000000000..93dd0d159b6
--- /dev/null
+++ b/pkg/p2p/libp2p/mock/mock_certmagic_test.go
@@ -0,0 +1,112 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mock_test
+
+import (
+ "crypto/tls"
+ "testing"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/mock"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func TestMockP2PForgeCertMgr_Start(t *testing.T) {
+ certLoaded := make(chan struct{})
+ mgr := mock.NewMockP2PForgeCertMgr(func() {
+ close(certLoaded)
+ })
+
+ if err := mgr.Start(); err != nil {
+ t.Fatalf("Start() error = %v, wantErr %v", err, nil)
+ }
+
+ select {
+ case <-certLoaded:
+ case <-time.After(time.Second):
+ t.Fatal("onCertLoaded callback was not triggered")
+ }
+
+ // Test idempotency
+ if err := mgr.Start(); err != nil {
+ t.Fatalf("Start() second call error = %v, wantErr %v", err, nil)
+ }
+}
+
+func TestMockP2PForgeCertMgr_Stop(t *testing.T) {
+ mgr := mock.NewMockP2PForgeCertMgr(nil)
+
+ if err := mgr.Start(); err != nil {
+ t.Fatalf("Start() error = %v, wantErr %v", err, nil)
+ }
+
+ mgr.Stop()
+
+ // Test idempotency
+ mgr.Stop()
+}
+
+func TestMockP2PForgeCertMgr_TLSConfig(t *testing.T) {
+ mgr := mock.NewMockP2PForgeCertMgr(nil)
+ cfg := mgr.TLSConfig()
+
+ if cfg == nil {
+ t.Fatal("TLSConfig() returned nil")
+ }
+
+ if len(cfg.Certificates) == 0 {
+ t.Error("TLSConfig() returned no certificates")
+ }
+}
+
+func TestMockP2PForgeCertMgr_AddressFactory(t *testing.T) {
+ mgr := mock.NewMockP2PForgeCertMgr(nil)
+ factory := mgr.AddressFactory()
+
+ addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
+ addrs := []ma.Multiaddr{addr}
+
+ got := factory(addrs)
+ if len(got) != 1 || !got[0].Equal(addr) {
+ t.Errorf("AddressFactory() = %v, want %v", got, addrs)
+ }
+}
+
+func TestMockP2PForgeCertMgr_SetOnCertLoaded(t *testing.T) {
+ mgr := mock.NewMockP2PForgeCertMgr(nil)
+ certLoaded := make(chan struct{})
+
+ mgr.SetOnCertLoaded(func() {
+ close(certLoaded)
+ })
+
+ if err := mgr.Start(); err != nil {
+ t.Fatalf("Start() error = %v, wantErr %v", err, nil)
+ }
+
+ select {
+ case <-certLoaded:
+ case <-time.After(time.Second):
+ t.Fatal("onCertLoaded callback was not triggered after SetOnCertLoaded")
+ }
+}
+
+func TestMockP2PForgeCertMgr_GetCache(t *testing.T) {
+ mgr := mock.NewMockP2PForgeCertMgr(nil)
+ if mgr.GetCache() == nil {
+ t.Error("GetCache() returned nil")
+ }
+}
+
+func TestMockConfig(t *testing.T) {
+ cfg := mock.NewMockConfig()
+ tlsCfg := cfg.TLSConfig()
+ if tlsCfg == nil {
+ t.Fatal("TLSConfig() returned nil")
+ }
+ if tlsCfg.MinVersion != tls.VersionTLS12 {
+ t.Errorf("TLSConfig().MinVersion = %v, want %v", tlsCfg.MinVersion, tls.VersionTLS12)
+ }
+}
diff --git a/pkg/p2p/libp2p/mock/testdata/cert.pem b/pkg/p2p/libp2p/mock/testdata/cert.pem
new file mode 100644
index 00000000000..942876e1fac
--- /dev/null
+++ b/pkg/p2p/libp2p/mock/testdata/cert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDCzCCAfOgAwIBAgIUYmVQGLnuPXQb8yC4X69ll6FQjWQwDQYJKoZIhvcNAQEL
+BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTI1MDgwMTA3MzA0OFoYDzIxMjUw
+NzA4MDczMDQ4WjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQC/NbDVAd4OdVMNiK+BjAFyTCIiJssgxhi6rWEgzykV
+6lv0Q4/loUCwp1ylh6LJa6yjioGmxXl2CqIALcxRXg/DopJFgfIPPhghSHVE6AvV
+PGf+Qw85vXfdjX6qymPsRRz5mMj9N9BidL1HgJjxpGw+7aDwXxMCvxwFN46kyfb9
+fawIIlAJv2gxBoKxf+8AXwimPUrXxG1HziMPNTjQGW+zVRzeK4pTNdt+CXcf4vLM
+LKv7eRXYiAL7m/6/UE/SQOXDGxvjkOTWgVX2gcV9tMZOaFJBp/WycsexiajRRxLQ
+tl1weQhyBmLA7N494sYpwmzF1feybkkrPeU7Vo7T2gLrAgMBAAGjUzBRMB0GA1Ud
+DgQWBBRs/abpJGBaT14jL05Fj9PVbc7T4TAfBgNVHSMEGDAWgBRs/abpJGBaT14j
+L05Fj9PVbc7T4TAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQA1
+mhrBSvtlW4mhxQAAEfX/JotieeGuG9nUE75LJ+sW5mRDO+5PZeHAajBdWFGftCm2
+vNONQMVkAt8PSPuZ+6t284HSpWLTgYGTu19m9JFDfw3Dj7HIX95c0xq1WdNSDvpR
+kRfoe7yc4B6mCKIU4EjAv0lUs21I2oLIbqQdhJneivutyCYcV/hxt7OJZs92g9HX
+S8MGeWRCFhoPoOQ9tvszqzf7AB3uqiRuFxk2DkuZ7yN63/+DGj2kTBkf3Qe+4DyS
+7DK5XkPdl/ajTXNoLdgYUw/5hdsbcTjnxRr37eKqoa/MtQxzQjoA5HWYoaDX+Vzp
+OwmvrIFPPZ+bU+6+Xs5i
+-----END CERTIFICATE-----
diff --git a/pkg/p2p/libp2p/mock/testdata/key.pem b/pkg/p2p/libp2p/mock/testdata/key.pem
new file mode 100644
index 00000000000..9668c2e083f
--- /dev/null
+++ b/pkg/p2p/libp2p/mock/testdata/key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC/NbDVAd4OdVMN
+iK+BjAFyTCIiJssgxhi6rWEgzykV6lv0Q4/loUCwp1ylh6LJa6yjioGmxXl2CqIA
+LcxRXg/DopJFgfIPPhghSHVE6AvVPGf+Qw85vXfdjX6qymPsRRz5mMj9N9BidL1H
+gJjxpGw+7aDwXxMCvxwFN46kyfb9fawIIlAJv2gxBoKxf+8AXwimPUrXxG1HziMP
+NTjQGW+zVRzeK4pTNdt+CXcf4vLMLKv7eRXYiAL7m/6/UE/SQOXDGxvjkOTWgVX2
+gcV9tMZOaFJBp/WycsexiajRRxLQtl1weQhyBmLA7N494sYpwmzF1feybkkrPeU7
+Vo7T2gLrAgMBAAECggEAAlN0carm2Q7pUAgAVX3d4SvUyhEZNdCNw1MNeqgP0IUi
+a22y1uyjjC9i4PdputYh+3bqgS1JdpmB6botbiQY0XZkyGQCt9fAB9yvjZRmylCz
+b0mNe23l+lo2lvPimLGxmrTjZUwbfCR1m0Jupqoi4TqOud7nRohxwIQ6Zjmm3Y53
+Ybiv8h46mhy2NZFI2zwaO9q0MpYozCsCR5Xi6DasJZedlAB+185mTJo79e2AllPI
+Y3tsQLgrCc2i9TAoEjvJdCxNDuElusabKgROj3DPr2LEkApeX2EdknNKqMIb+NIk
+7htDSYjoJuG1ABRB0vBx7s0OGtk+IC/xYwdYB8wgdQKBgQDszNAHHUGbBr2yh8MP
+JXqcoxXiDMOjyrrJcTzkUGXP2tCG05u+do8e4Or1K6WXKJGuITeewd6cW/HE2Mlt
+sc58b+6+H3hm1dkuAOj+uF6mV8Al2TxVoRrKlAojAzu9QhkH+9GYR+nGqz7wKSnS
+6EQETiCfd5LYrK0OH12wOla+JQKBgQDOtpLWroD5Vjf4KJ2qX4un2V2IKyWMcofH
+t4qWbD+6F4Kt/ZCxFKviLBwRffGaatp7E1W0kU76oflsrnpRzSzMearlLIIid8nA
+ucXCuyJdQPSivsHJJM6MMQ4BzT2stR8JDtJMNip/JUb0pZFS/aG1BCp+nLSoU23m
+q7vyUvJHzwKBgQDcaCaY+Jo/+Z5HtiXQy0m80e9kYA0ZP3FsXoIW4N5jAYBmfj/Q
+n/nG/AK2ANI4SAKQ2Uoz8q+JSetXFZEnEQDowiatwA0JarKjJyW3MVSn77VhhTmr
+WjDdrb1hqXjJR+SUkcccvpLR4ELMtwO+04G7oBytUVbVZqQNKRTDGwnyIQKBgD+t
+1KxX05l76wACmxdqGZ6agoq5J/cNLTDkJMhUDomoRnSNAW7bvFuPVRI6Zxw3wJhb
+i3J1tQvWq/zD/yCGAT/4VyIERQ6TMk6xq+9iMKLjqLkd5JqvQQXE8tixPkefADGN
+JFGf+hVzCVnCS3NyeMdHwkOAyNJ16Qw/aUWsMcDXAoGBAJpIdS7fu7Odxp76ssjy
+AUsUFvGLH3rndYUWANy9xmz5FAxt4pZAbtbIYqPXy+4hpXWhqgOmTti8GDqL1W8g
+CdYKvjVOHj5GNMQ6QJ3b1MDmUOeSgaN7NIY7SuKlKT54nYkplej9cd/4h1DpjHar
+3eEDaJABlw0eDY7F2qXJZzZ3
+-----END PRIVATE KEY-----
diff --git a/pkg/p2p/libp2p/p2pforge.go b/pkg/p2p/libp2p/p2pforge.go
new file mode 100644
index 00000000000..fd1d13b22eb
--- /dev/null
+++ b/pkg/p2p/libp2p/p2pforge.go
@@ -0,0 +1,138 @@
+// Copyright 2026 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package libp2p
+
+import (
+ "crypto/tls"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ p2pforge "github.com/ipshipyard/p2p-forge/client"
+ "github.com/libp2p/go-libp2p/config"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// P2PForgeOptions contains the configuration for creating a P2P Forge certificate manager.
+type P2PForgeOptions struct {
+ Domain string
+ RegistrationEndpoint string
+ CAEndpoint string
+ StorageDir string
+}
+
+// P2PForgeCertManager wraps the p2p-forge certificate manager with its associated zap logger.
+type P2PForgeCertManager struct {
+ certMgr *p2pforge.P2PForgeCertMgr
+ zapLogger *zap.Logger
+}
+
+// newP2PForgeCertManager creates a new P2P Forge certificate manager.
+// It handles the creation of the storage directory and configures logging
+// to match bee's verbosity level.
+func newP2PForgeCertManager(beeLogger log.Logger, opts P2PForgeOptions) (*P2PForgeCertManager, error) {
+ zapLogger, err := newZapLogger(beeLogger)
+ if err != nil {
+ return nil, fmt.Errorf("create zap logger: %w", err)
+ }
+
+ // Use storage dir with domain subdir for easier management of different registries.
+ storagePath := filepath.Join(opts.StorageDir, opts.Domain)
+ if err := os.MkdirAll(storagePath, 0o700); err != nil {
+ return nil, fmt.Errorf("create certificate storage directory %s: %w", storagePath, err)
+ }
+
+ certMgr, err := p2pforge.NewP2PForgeCertMgr(
+ p2pforge.WithForgeDomain(opts.Domain),
+ p2pforge.WithForgeRegistrationEndpoint(opts.RegistrationEndpoint),
+ p2pforge.WithCAEndpoint(opts.CAEndpoint),
+ p2pforge.WithCertificateStorage(&certmagic.FileStorage{Path: storagePath}),
+ p2pforge.WithLogger(zapLogger.Sugar()),
+ p2pforge.WithUserAgent(userAgent()),
+ p2pforge.WithAllowPrivateForgeAddrs(),
+ p2pforge.WithRegistrationDelay(0),
+ p2pforge.WithOnCertLoaded(func() {
+ beeLogger.Info("auto tls certificate is loaded")
+ }),
+ p2pforge.WithOnCertRenewed(func() {
+ beeLogger.Info("auto tls certificate is renewed")
+ }),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("initialize P2P Forge: %w", err)
+ }
+
+ return &P2PForgeCertManager{
+ certMgr: certMgr,
+ zapLogger: zapLogger,
+ }, nil
+}
+
+// CertMgr returns the underlying p2pforge.P2PForgeCertMgr.
+func (m *P2PForgeCertManager) CertMgr() *p2pforge.P2PForgeCertMgr {
+ return m.certMgr
+}
+
+// ZapLogger returns the zap logger used by the certificate manager.
+func (m *P2PForgeCertManager) ZapLogger() *zap.Logger {
+ return m.zapLogger
+}
+
+// autoTLSCertManager defines the interface for managing TLS certificates.
+type autoTLSCertManager interface {
+ Start() error
+ Stop()
+ TLSConfig() *tls.Config
+ AddressFactory() config.AddrsFactory
+}
+
+// newZapLogger creates a zap logger configured to match bee's verbosity level.
+// This is used by third-party libraries (like p2p-forge) that require a zap logger.
+func newZapLogger(beeLogger log.Logger) (*zap.Logger, error) {
+ cfg := zap.Config{
+ Level: zap.NewAtomicLevelAt(beeVerbosityToZapLevel(beeLogger.Verbosity())),
+ Development: false,
+ Encoding: "json",
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ Sampling: &zap.SamplingConfig{
+ Initial: 100,
+ Thereafter: 100,
+ },
+ EncoderConfig: zapcore.EncoderConfig{
+ TimeKey: "time",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.EpochTimeEncoder,
+ EncodeDuration: zapcore.SecondsDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ },
+ }
+ return cfg.Build()
+}
+
+// beeVerbosityToZapLevel converts bee's log verbosity level to zap's log level.
+func beeVerbosityToZapLevel(v log.Level) zapcore.Level {
+ switch {
+ case v <= log.VerbosityNone:
+ return zap.FatalLevel // effectively silences the logger
+ case v == log.VerbosityError:
+ return zap.ErrorLevel
+ case v == log.VerbosityWarning:
+ return zap.WarnLevel
+ case v == log.VerbosityInfo:
+ return zap.InfoLevel
+ default:
+ return zap.DebugLevel // VerbosityDebug and VerbosityAll
+ }
+}
diff --git a/pkg/p2p/libp2p/peer.go b/pkg/p2p/libp2p/peer.go
index 5aa0f75713a..232fbf9b103 100644
--- a/pkg/p2p/libp2p/peer.go
+++ b/pkg/p2p/libp2p/peer.go
@@ -18,12 +18,13 @@ import (
)
type peerRegistry struct {
- underlays map[string]libp2ppeer.ID // map overlay address to underlay peer id
- overlays map[libp2ppeer.ID]swarm.Address // map underlay peer id to overlay address
- full map[libp2ppeer.ID]bool // map to track whether a node is full or light node (true=full)
- connections map[libp2ppeer.ID]map[network.Conn]struct{} // list of connections for safe removal on Disconnect notification
- streams map[libp2ppeer.ID]map[network.Stream]context.CancelFunc
- mu sync.RWMutex
+ overlayToPeerID map[string]libp2ppeer.ID // map overlay address to underlay peer id
+ overlays map[libp2ppeer.ID]swarm.Address // map underlay peer id to overlay address
+ full map[libp2ppeer.ID]bool // map to track whether a node is full or light node (true=full)
+ bee260Compatibility map[libp2ppeer.ID]bool // map to track bee260 backward compatibility
+ connections map[libp2ppeer.ID]map[network.Conn]struct{} // list of connections for safe removal on Disconnect notification
+ streams map[libp2ppeer.ID]map[network.Stream]context.CancelFunc
+ mu sync.RWMutex
//nolint:misspell
disconnecter disconnecter // peerRegistry notifies libp2p on peer disconnection
@@ -36,11 +37,12 @@ type disconnecter interface {
func newPeerRegistry() *peerRegistry {
return &peerRegistry{
- underlays: make(map[string]libp2ppeer.ID),
- overlays: make(map[libp2ppeer.ID]swarm.Address),
- full: make(map[libp2ppeer.ID]bool),
- connections: make(map[libp2ppeer.ID]map[network.Conn]struct{}),
- streams: make(map[libp2ppeer.ID]map[network.Stream]context.CancelFunc),
+ overlayToPeerID: make(map[string]libp2ppeer.ID),
+ overlays: make(map[libp2ppeer.ID]swarm.Address),
+ full: make(map[libp2ppeer.ID]bool),
+ bee260Compatibility: make(map[libp2ppeer.ID]bool),
+ connections: make(map[libp2ppeer.ID]map[network.Conn]struct{}),
+ streams: make(map[libp2ppeer.ID]map[network.Stream]context.CancelFunc),
Notifiee: new(network.NoopNotifiee),
}
@@ -51,7 +53,7 @@ func (r *peerRegistry) Exists(overlay swarm.Address) (found bool) {
return found
}
-// Disconnect removes the peer from registry in disconnect.
+// Disconnected removes the peer from registry in disconnect.
// peerRegistry has to be set by network.Network.Notify().
func (r *peerRegistry) Disconnected(_ network.Network, c network.Conn) {
peerID := c.RemotePeer()
@@ -75,12 +77,13 @@ func (r *peerRegistry) Disconnected(_ network.Network, c network.Conn) {
delete(r.connections, peerID)
overlay := r.overlays[peerID]
delete(r.overlays, peerID)
- delete(r.underlays, overlay.ByteString())
+ delete(r.overlayToPeerID, overlay.ByteString())
for _, cancel := range r.streams[peerID] {
cancel()
}
delete(r.streams, peerID)
delete(r.full, peerID)
+ delete(r.bee260Compatibility, peerID)
r.mu.Unlock()
r.disconnecter.disconnected(overlay)
@@ -143,12 +146,12 @@ func (r *peerRegistry) addIfNotExists(c network.Conn, overlay swarm.Address, ful
// this is solving a case of multiple underlying libp2p connections for the same peer
r.connections[peerID][c] = struct{}{}
- if _, exists := r.underlays[overlay.ByteString()]; exists {
+ if _, exists := r.overlayToPeerID[overlay.ByteString()]; exists {
return true
}
r.streams[peerID] = make(map[network.Stream]context.CancelFunc)
- r.underlays[overlay.ByteString()] = peerID
+ r.overlayToPeerID[overlay.ByteString()] = peerID
r.overlays[peerID] = overlay
r.full[peerID] = full
return false
@@ -157,7 +160,7 @@ func (r *peerRegistry) addIfNotExists(c network.Conn, overlay swarm.Address, ful
func (r *peerRegistry) peerID(overlay swarm.Address) (peerID libp2ppeer.ID, found bool) {
r.mu.RLock()
- peerID, found = r.underlays[overlay.ByteString()]
+ peerID, found = r.overlayToPeerID[overlay.ByteString()]
r.mu.RUnlock()
return peerID, found
}
@@ -176,6 +179,19 @@ func (r *peerRegistry) fullnode(peerID libp2ppeer.ID) (bool, bool) {
return full, found
}
+func (r *peerRegistry) bee260(peerID libp2ppeer.ID) (compat, found bool) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ compat, found = r.bee260Compatibility[peerID]
+ return compat, found
+}
+
+func (r *peerRegistry) setBee260(peerID libp2ppeer.ID, compat bool) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.bee260Compatibility[peerID] = compat
+}
+
func (r *peerRegistry) isConnected(peerID libp2ppeer.ID, remoteAddr ma.Multiaddr) (swarm.Address, bool) {
if remoteAddr == nil {
return swarm.ZeroAddress, false
@@ -207,9 +223,9 @@ func (r *peerRegistry) isConnected(peerID libp2ppeer.ID, remoteAddr ma.Multiaddr
func (r *peerRegistry) remove(overlay swarm.Address) (found, full bool, peerID libp2ppeer.ID) {
r.mu.Lock()
- peerID, found = r.underlays[overlay.ByteString()]
+ peerID, found = r.overlayToPeerID[overlay.ByteString()]
delete(r.overlays, peerID)
- delete(r.underlays, overlay.ByteString())
+ delete(r.overlayToPeerID, overlay.ByteString())
delete(r.connections, peerID)
for _, cancel := range r.streams[peerID] {
cancel()
@@ -217,6 +233,7 @@ func (r *peerRegistry) remove(overlay swarm.Address) (found, full bool, peerID l
delete(r.streams, peerID)
full = r.full[peerID]
delete(r.full, peerID)
+ delete(r.bee260Compatibility, peerID)
r.mu.Unlock()
return found, full, peerID
diff --git a/pkg/p2p/libp2p/protocols_test.go b/pkg/p2p/libp2p/protocols_test.go
index 4563083726a..0bec5ba49af 100644
--- a/pkg/p2p/libp2p/protocols_test.go
+++ b/pkg/p2p/libp2p/protocols_test.go
@@ -18,16 +18,14 @@ import (
libp2pm "github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/host"
protocol "github.com/libp2p/go-libp2p/core/protocol"
- bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
- swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ ma "github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multistream"
)
func TestNewStream(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -61,8 +59,7 @@ func TestNewStream(t *testing.T) {
func TestNewStream_OnlyFull(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -101,8 +98,7 @@ func TestNewStream_OnlyFull(t *testing.T) {
func TestNewStream_Mixed(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -140,8 +136,7 @@ func TestNewStream_Mixed(t *testing.T) {
func TestNewStreamMulti(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -190,8 +185,7 @@ func TestNewStreamMulti(t *testing.T) {
func TestNewStream_errNotSupported(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -229,8 +223,7 @@ func TestNewStream_errNotSupported(t *testing.T) {
func TestNewStream_semanticVersioning(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -292,8 +285,7 @@ func TestNewStream_semanticVersioning(t *testing.T) {
func TestDisconnectError(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -324,8 +316,7 @@ func TestDisconnectError(t *testing.T) {
func TestConnectDisconnectEvents(t *testing.T) {
t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{
FullNode: true,
@@ -406,30 +397,34 @@ func TestPing(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
+ listenAddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hostFactory := libp2p.WithHostFactory(
+ func(opts ...libp2pm.Option) (host.Host, error) {
+ opts = append(opts, libp2pm.ListenAddrs(listenAddr))
+
+ h, err := libp2pm.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ return h, nil
+ },
+ )
+
s1, _ := newService(t, 1, libp2pServiceOpts{
- libp2pOpts: libp2p.WithHostFactory(
- func(...libp2pm.Option) (host.Host, error) {
- return bhost.NewHost(swarmt.GenSwarm(t), &bhost.HostOpts{EnablePing: true})
- },
- ),
+ libp2pOpts: hostFactory,
})
s2, _ := newService(t, 1, libp2pServiceOpts{
- libp2pOpts: libp2p.WithHostFactory(
- func(...libp2pm.Option) (host.Host, error) {
- host, err := bhost.NewHost(swarmt.GenSwarm(t), &bhost.HostOpts{EnablePing: true})
- if err != nil {
- t.Fatalf("start host: %v", err)
- }
- host.Start()
- return host, nil
- },
- ),
+ libp2pOpts: hostFactory,
})
addr := serviceUnderlayAddress(t, s1)
- if _, err := s2.Ping(ctx, addr); err != nil {
+ if _, err := s2.Ping(ctx, addr[0]); err != nil {
t.Fatal(err)
}
}
diff --git a/pkg/p2p/libp2p/static_resolver.go b/pkg/p2p/libp2p/static_resolver.go
index c5416862e1b..5927b2b022a 100644
--- a/pkg/p2p/libp2p/static_resolver.go
+++ b/pkg/p2p/libp2p/static_resolver.go
@@ -5,12 +5,10 @@
package libp2p
import (
- "errors"
"fmt"
"net"
"strings"
- libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
)
@@ -40,16 +38,7 @@ func newStaticAddressResolver(addr string, lookupIP func(host string) ([]net.IP,
}
func (r *staticAddressResolver) Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error) {
- observableAddrInfo, err := libp2ppeer.AddrInfoFromP2pAddr(observedAddress)
- if err != nil {
- return nil, err
- }
-
- if len(observableAddrInfo.Addrs) < 1 {
- return nil, errors.New("invalid observed address")
- }
-
- observedAddrSplit := strings.Split(observableAddrInfo.Addrs[0].String(), "/")
+ observedAddrSplit := strings.Split(observedAddress.String(), "/")
// if address is not in a form of '/ipversion/ip/protocol/port/...` don't compare to addresses and return it
if len(observedAddrSplit) < 5 {
@@ -69,12 +58,55 @@ func (r *staticAddressResolver) Resolve(observedAddress ma.Multiaddr) (ma.Multia
} else {
port = observedAddrSplit[4]
}
- a, err := ma.NewMultiaddr(multiProto + "/" + observedAddrSplit[3] + "/" + port)
+
+ const ipv4 = "ip4"
+ const ipv6 = "ip6"
+
+ // replace sni protocol ip subdomain in domain name
+ otherComponents := observedAddrSplit[5:]
+ ipSplit := strings.Split(multiProto, "/")
+ expectedIPSniHost := newIPSniHost(observedAddrSplit[2], observedAddrSplit[1] == ipv4)
+ if len(ipSplit) == 3 && (ipSplit[1] == ipv4 || ipSplit[1] == ipv6) {
+ for i, part := range otherComponents {
+ if part == "sni" && i+1 < len(otherComponents) {
+ ip := ipSplit[2]
+ isIPv4 := ipSplit[1] == ipv4
+
+ sniParts := strings.Split(otherComponents[i+1], ".")
+ if len(sniParts) > 0 {
+ if sniParts[0] != expectedIPSniHost {
+ continue
+ }
+ sniParts[0] = newIPSniHost(ip, isIPv4)
+ otherComponents[i+1] = strings.Join(sniParts, ".")
+ }
+ break // assume only one sni component
+ }
+ }
+ }
+
+ a, err := ma.NewMultiaddr(multiProto + "/" + observedAddrSplit[3] + "/" + port + "/" + strings.Join(otherComponents, "/"))
if err != nil {
return nil, err
}
- return buildUnderlayAddress(a, observableAddrInfo.ID)
+ return a, nil
+}
+
+func newIPSniHost(ip string, isIPv4 bool) string {
+ if isIPv4 {
+ return strings.ReplaceAll(ip, ".", "-")
+ }
+
+ // IPv6
+ newSniHost := strings.ReplaceAll(ip, ":", "-")
+ if newSniHost[0] == '-' {
+ newSniHost = "0" + newSniHost
+ }
+ if newSniHost[len(newSniHost)-1] == '-' {
+ newSniHost = newSniHost + "0"
+ }
+ return newSniHost
}
func getMultiProto(host string, lookupIP func(host string) ([]net.IP, error)) (string, error) {
diff --git a/pkg/p2p/libp2p/static_resolver_test.go b/pkg/p2p/libp2p/static_resolver_test.go
index c45cce0142f..8e9f022721d 100644
--- a/pkg/p2p/libp2p/static_resolver_test.go
+++ b/pkg/p2p/libp2p/static_resolver_test.go
@@ -27,6 +27,12 @@ func TestStaticAddressResolver(t *testing.T) {
observableAddress: "/ip4/127.0.0.1/tcp/7071/p2p/16Uiu2HAkyyGKpjBiCkVqCKoJa6RzzZw9Nr7hGogsMPcdad1KyMmd",
want: "/ip4/127.0.0.1/tcp/30123/p2p/16Uiu2HAkyyGKpjBiCkVqCKoJa6RzzZw9Nr7hGogsMPcdad1KyMmd",
},
+ {
+ name: "without p2p protocols",
+ natAddr: ":30123",
+ observableAddress: "/ip4/127.0.0.1/tcp/7071",
+ want: "/ip4/127.0.0.1/tcp/30123",
+ },
{
name: "replace ip v4",
natAddr: "192.168.1.34:",
@@ -81,10 +87,38 @@ func TestStaticAddressResolver(t *testing.T) {
observableAddress: "/ip4/127.0.0.1/tcp/7071/p2p/16Uiu2HAkyyGKpjBiCkVqCKoJa6RzzZw9Nr7hGogsMPcdad1KyMmd",
want: "/dns/ipv4and6.com/tcp/30777/p2p/16Uiu2HAkyyGKpjBiCkVqCKoJa6RzzZw9Nr7hGogsMPcdad1KyMmd",
},
+ {
+ name: "replace ip and port with complex multiaddr",
+ natAddr: "192.168.1.34:30777",
+ observableAddress: "/ip4/10.233.99.40/tcp/1635/tls/sni/*.libp2p.direct/ws/p2p/QmWbXocGMpfa8zApx9kCNwfmc35bbRJv136bdtuQjbR4wL",
+ want: "/ip4/192.168.1.34/tcp/30777/tls/sni/*.libp2p.direct/ws/p2p/QmWbXocGMpfa8zApx9kCNwfmc35bbRJv136bdtuQjbR4wL",
+ },
+ {
+ name: "replace ip and port with complex multiaddr without p2p protocol",
+ natAddr: "192.168.1.34:30777",
+ observableAddress: "/ip4/10.233.99.40/tcp/1635/tls/sni/*.libp2p.direct/ws",
+ want: "/ip4/192.168.1.34/tcp/30777/tls/sni/*.libp2p.direct/ws",
+ },
+ {
+ name: "replace ip and port with complex multiaddr without p2p protocol and with full domain",
+ natAddr: "192.168.1.34:30777",
+ observableAddress: "/ip4/10.233.99.40/tcp/1635/tls/sni/10-233-99-40.k2k4r8nnyi7fa2p2t2fjfam2vdyk5esk1iwlfkdz1yezu56o0o0b966w.libp2p.direct/ws",
+ want: "/ip4/192.168.1.34/tcp/30777/tls/sni/192-168-1-34.k2k4r8nnyi7fa2p2t2fjfam2vdyk5esk1iwlfkdz1yezu56o0o0b966w.libp2p.direct/ws",
+ },
+ {
+ name: "replace ip v6 and port with complex multiaddr without p2p protocol and with full domain",
+ natAddr: "[::1]:30777",
+ observableAddress: "/ip4/10.233.99.40/tcp/1635/tls/sni/10-233-99-40.k2k4r8nnyi7fa2p2t2fjfam2vdyk5esk1iwlfkdz1yezu56o0o0b966w.libp2p.direct/ws",
+ want: "/ip6/::1/tcp/30777/tls/sni/0--1.k2k4r8nnyi7fa2p2t2fjfam2vdyk5esk1iwlfkdz1yezu56o0o0b966w.libp2p.direct/ws",
+ },
+ {
+ name: "do not change arbitrary sni domain",
+ natAddr: "[::1]:30777",
+ observableAddress: "/ip4/10.233.99.40/tcp/1635/tls/sni/some-node.ethswarm.org/ws",
+ want: "/ip6/::1/tcp/30777/tls/sni/some-node.ethswarm.org/ws",
+ },
} {
t.Run(tc.name, func(t *testing.T) {
- t.Parallel()
-
r, err := libp2p.NewStaticAddressResolver(tc.natAddr, func(host string) ([]net.IP, error) {
hosts := map[string][]net.IP{
"ipv4.com": {
diff --git a/pkg/p2p/libp2p/tracing_test.go b/pkg/p2p/libp2p/tracing_test.go
index 7e02f8e2d7c..533a49088ec 100644
--- a/pkg/p2p/libp2p/tracing_test.go
+++ b/pkg/p2p/libp2p/tracing_test.go
@@ -58,15 +58,13 @@ func TestTracing(t *testing.T) {
addr := serviceUnderlayAddress(t, s1)
- connectContext, connectCancel := context.WithCancel(context.Background())
- defer connectCancel()
+ connectContext := t.Context()
if _, err := s2.Connect(connectContext, addr); err != nil {
t.Fatal(err)
}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := t.Context()
span, _, ctx := tracer2.StartSpanFromContext(ctx, "test-p2p-client", nil)
defer span.Finish()
diff --git a/pkg/p2p/libp2p/upnp.go b/pkg/p2p/libp2p/upnp.go
deleted file mode 100644
index e0dcc3f577e..00000000000
--- a/pkg/p2p/libp2p/upnp.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package libp2p
-
-import (
- "errors"
- "strings"
-
- "github.com/libp2p/go-libp2p/core/host"
- libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
- ma "github.com/multiformats/go-multiaddr"
-)
-
-type UpnpAddressResolver struct {
- host host.Host
-}
-
-// Resolve checks if there is a possible better advertisable underlay then the provided observed address.
-// In some NAT situations, for example in the case when nodes are behind upnp, observer might send the observed address with a wrong port.
-// In this case, observed address is compared to addresses provided by host, and if there is a same address but with different port, that one is used as advertisable address instead of provided observed one.
-// TODO: this is a quickfix and it will be improved in the future
-func (r *UpnpAddressResolver) Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error) {
- observableAddrInfo, err := libp2ppeer.AddrInfoFromP2pAddr(observedAddress)
- if err != nil {
- return nil, err
- }
-
- if len(observableAddrInfo.Addrs) < 1 {
- return nil, errors.New("invalid observed address")
- }
-
- observedAddrSplit := strings.Split(observableAddrInfo.Addrs[0].String(), "/")
-
- // if address is not in a form of '/ipversion/ip/protocol/port/...` don't compare to addresses and return it
- if len(observedAddrSplit) < 5 {
- return observedAddress, nil
- }
-
- observedAddressPort := observedAddrSplit[4]
-
- // observervedAddressShort is an obaserved address without port
- observervedAddressShort := strings.Join(append(observedAddrSplit[:4], observedAddrSplit[5:]...), "/")
-
- for _, a := range r.host.Addrs() {
- asplit := strings.Split(a.String(), "/")
- if len(asplit) != len(observedAddrSplit) {
- continue
- }
-
- aport := asplit[4]
- if strings.Join(append(asplit[:4], asplit[5:]...), "/") != observervedAddressShort {
- continue
- }
-
- if aport != observedAddressPort {
- aaddress, err := buildUnderlayAddress(a, observableAddrInfo.ID)
- if err != nil {
- continue
- }
-
- return aaddress, nil
- }
- }
-
- return observedAddress, nil
-}
diff --git a/pkg/p2p/libp2p/version_test.go b/pkg/p2p/libp2p/version_test.go
new file mode 100644
index 00000000000..15e0ce3eba6
--- /dev/null
+++ b/pkg/p2p/libp2p/version_test.go
@@ -0,0 +1,234 @@
+// Copyright 2026 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package libp2p
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/statestore/mock"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
+)
+
+func TestBee260BackwardCompatibility(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ userAgent string
+ want bool
+ }{
+ // Versions < 2.7.0 should require backward compatibility
+ {
+ name: "version 2.6.0",
+ userAgent: "bee/2.6.0 go1.22.0 linux/amd64",
+ want: true,
+ },
+ {
+ name: "version 2.6.5",
+ userAgent: "bee/2.6.5 go1.22.0 linux/amd64",
+ want: true,
+ },
+ {
+ name: "version 2.5.0",
+ userAgent: "bee/2.5.0 go1.21.0 linux/amd64",
+ want: true,
+ },
+ {
+ name: "version 2.6.0-beta1",
+ userAgent: "bee/2.6.0-beta1 go1.22.0 linux/amd64",
+ want: true,
+ },
+ // Versions >= 2.7.0 should NOT require backward compatibility
+ {
+ name: "version 2.7.0",
+ userAgent: "bee/2.7.0 go1.23.0 linux/amd64",
+ want: false,
+ },
+ {
+ name: "version 2.8.0",
+ userAgent: "bee/2.8.0 go1.23.0 linux/amd64",
+ want: false,
+ },
+ {
+ name: "version 3.0.0",
+ userAgent: "bee/3.0.0 go1.25.0 linux/amd64",
+ want: false,
+ },
+ // Pre-release versions >= 2.7.0 should NOT require backward compatibility
+ // This is the critical fix: 2.7.0-rcX should be treated as >= 2.7.0
+ {
+ name: "version 2.7.0-rc1",
+ userAgent: "bee/2.7.0-rc1 go1.23.0 linux/amd64",
+ want: false,
+ },
+ {
+ name: "version 2.7.0-rc12",
+ userAgent: "bee/2.7.0-rc12-b39629d5-dirty go1.25.6 linux/amd64",
+ want: false,
+ },
+ {
+ name: "version 2.7.0-beta1",
+ userAgent: "bee/2.7.0-beta1 go1.23.0 linux/amd64",
+ want: false,
+ },
+ {
+ name: "version 2.8.0-rc1",
+ userAgent: "bee/2.8.0-rc1 go1.24.0 linux/amd64",
+ want: false,
+ },
+ {
+ name: "version 2.9.0-beta1",
+ userAgent: "bee/2.9.0-beta1 go1.24.0 linux/amd64",
+ want: false,
+ },
+ // Edge cases that should return false (not requiring backward compat)
+ {
+ name: "empty user agent",
+ userAgent: "",
+ want: false,
+ },
+ {
+ name: "malformed user agent missing space",
+ userAgent: "bee/2.6.0",
+ want: false,
+ },
+ {
+ name: "non-bee user agent",
+ userAgent: "other/1.0.0 go1.22.0 linux/amd64",
+ want: false,
+ },
+ {
+ name: "invalid version format",
+ userAgent: "bee/invalid go1.22.0 linux/amd64",
+ want: false,
+ },
+ {
+ name: "default libp2p user agent",
+ userAgent: "github.com/libp2p/go-libp2p",
+ want: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ // Create a service with minimal configuration
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ swarmKey, err := crypto.GenerateSecp256k1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ overlay := swarm.RandAddress(t)
+ addr := ":0"
+ networkID := uint64(1)
+
+ statestore := mock.NewStateStore()
+ defer statestore.Close()
+
+ s, err := New(ctx, crypto.NewDefaultSigner(swarmKey), networkID, overlay, addr, nil, statestore, nil, log.Noop, nil, Options{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer s.Close()
+
+ // Create a random test peer ID - we only need any valid libp2p peer ID
+ // The peerstore lookup will be mocked by setting the AgentVersion directly
+ libp2pPeerID, err := libp2ppeer.Decode("16Uiu2HAm3g4hXfCWTDhPBq3KkqpV3wGkPVgMJY3Jt8gGTYWiTWNZ")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Set the user agent in the peerstore if provided
+ if tc.userAgent != "" {
+ if err := s.host.Peerstore().Put(libp2pPeerID, "AgentVersion", tc.userAgent); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Test the backward compatibility check
+ got := s.bee260BackwardCompatibility(libp2pPeerID)
+ if got != tc.want {
+ t.Errorf("bee260BackwardCompatibility() = %v, want %v (userAgent: %q)", got, tc.want, tc.userAgent)
+ }
+ })
+ }
+}
+
+func TestBee260Cache(t *testing.T) {
+ t.Parallel()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ swarmKey, err := crypto.GenerateSecp256k1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ overlay := swarm.RandAddress(t)
+ addr := ":0"
+ networkID := uint64(1)
+
+ statestore := mock.NewStateStore()
+ defer statestore.Close()
+
+ s, err := New(ctx, crypto.NewDefaultSigner(swarmKey), networkID, overlay, addr, nil, statestore, nil, log.Noop, nil, Options{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer s.Close()
+
+ libp2pPeerID, err := libp2ppeer.Decode("16Uiu2HAm3g4hXfCWTDhPBq3KkqpV3wGkPVgMJY3Jt8gGTYWiTWNZ")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // 1. Set user agent to 2.6.0 (compat = true)
+ if err := s.host.Peerstore().Put(libp2pPeerID, "AgentVersion", "bee/2.6.0 go1.22.0 linux/amd64"); err != nil {
+ t.Fatal(err)
+ }
+
+ // 2. First call should calculate and cache it
+ if !s.bee260BackwardCompatibility(libp2pPeerID) {
+ t.Fatal("expected true for 2.6.0")
+ }
+
+ // 3. Verify it's in the cache
+ compat, found := s.peers.bee260(libp2pPeerID)
+ if !found {
+ t.Fatal("expected value to be in cache")
+ }
+ if !compat {
+ t.Fatal("expected cached value to be true")
+ }
+
+ // 4. Change user agent in peerstore to 2.7.0 (compat = false)
+ // If caching works, bee260BackwardCompatibility should still return true
+ if err := s.host.Peerstore().Put(libp2pPeerID, "AgentVersion", "bee/2.7.0 go1.23.0 linux/amd64"); err != nil {
+ t.Fatal(err)
+ }
+
+ if !s.bee260BackwardCompatibility(libp2pPeerID) {
+ t.Fatal("expected true (cached value) even if peerstore changed")
+ }
+
+ // 5. Clear cache (manually for testing)
+ s.peers.mu.Lock()
+ delete(s.peers.bee260Compatibility, libp2pPeerID)
+ s.peers.mu.Unlock()
+
+ // 6. Now it should re-calculate and return false for 2.7.0
+ if s.bee260BackwardCompatibility(libp2pPeerID) {
+ t.Fatal("expected false for 2.7.0 after cache clear")
+ }
+}
diff --git a/pkg/p2p/mock/mock.go b/pkg/p2p/mock/mock.go
index b5fc33f5373..59197b30ba0 100644
--- a/pkg/p2p/mock/mock.go
+++ b/pkg/p2p/mock/mock.go
@@ -18,7 +18,7 @@ import (
// Service is the mock of a P2P Service
type Service struct {
addProtocolFunc func(p2p.ProtocolSpec) error
- connectFunc func(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error)
+ connectFunc func(ctx context.Context, addr []ma.Multiaddr) (address *bzz.Address, err error)
disconnectFunc func(overlay swarm.Address, reason string) error
peersFunc func() []p2p.Peer
blocklistedPeersFunc func() ([]p2p.BlockListedPeer, error)
@@ -38,7 +38,7 @@ func WithAddProtocolFunc(f func(p2p.ProtocolSpec) error) Option {
}
// WithConnectFunc sets the mock implementation of the Connect function
-func WithConnectFunc(f func(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error)) Option {
+func WithConnectFunc(f func(ctx context.Context, addr []ma.Multiaddr) (address *bzz.Address, err error)) Option {
return optionFunc(func(s *Service) {
s.connectFunc = f
})
@@ -108,7 +108,7 @@ func (s *Service) AddProtocol(spec p2p.ProtocolSpec) error {
return s.addProtocolFunc(spec)
}
-func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error) {
+func (s *Service) Connect(ctx context.Context, addr []ma.Multiaddr) (address *bzz.Address, err error) {
if s.connectFunc == nil {
return nil, errors.New("function Connect not configured")
}
diff --git a/pkg/p2p/p2p.go b/pkg/p2p/p2p.go
index a85b3d9d04b..ca725f73dd2 100644
--- a/pkg/p2p/p2p.go
+++ b/pkg/p2p/p2p.go
@@ -61,7 +61,7 @@ var ErrNetworkUnavailable = errors.New("network unavailable")
type Service interface {
AddProtocol(ProtocolSpec) error
// Connect to a peer but do not notify topology about the established connection.
- Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error)
+ Connect(ctx context.Context, addrs []ma.Multiaddr) (address *bzz.Address, err error)
Disconnecter
Peers() []Peer
Blocklisted(swarm.Address) (bool, error)
@@ -141,6 +141,12 @@ type Streamer interface {
NewStream(ctx context.Context, address swarm.Address, h Headers, protocol, version, stream string) (Stream, error)
}
+// Bee260CompatibilityStreamer is able to create a new Stream and check if a peer is running Bee 2.6.0.
+type Bee260CompatibilityStreamer interface {
+ NewStream(ctx context.Context, address swarm.Address, h Headers, protocol, version, stream string) (Stream, error)
+ IsBee260(address swarm.Address) bool
+}
+
type StreamerDisconnecter interface {
Streamer
Disconnecter
@@ -237,3 +243,18 @@ func (e *ChunkDeliveryError) Error() string {
func NewChunkDeliveryError(msg string) error {
return &ChunkDeliveryError{msg: msg}
}
+
+// FilterBee260CompatibleUnderlays select a single underlay to pass if
+// bee260compatibility is true. Otherwise it passes the unmodified underlays
+// slice. This function can be safely removed when bee version 2.6.0 is
+// deprecated.
+func FilterBee260CompatibleUnderlays(bee260compatibility bool, underlays []ma.Multiaddr) []ma.Multiaddr {
+ if !bee260compatibility {
+ return underlays
+ }
+ underlay := bzz.SelectBestAdvertisedAddress(underlays, nil)
+ if underlay == nil {
+ return underlays
+ }
+ return []ma.Multiaddr{underlay}
+}
diff --git a/pkg/p2p/protobuf/protobuf_test.go b/pkg/p2p/protobuf/protobuf_test.go
index 3a1edf30bf5..f70e3754161 100644
--- a/pkg/p2p/protobuf/protobuf_test.go
+++ b/pkg/p2p/protobuf/protobuf_test.go
@@ -49,7 +49,7 @@ func TestReader_ReadMsg(t *testing.T) {
r := tc.readerFunc()
var msg pb.Message
- for i := 0; i < len(messages); i++ {
+ for i := range messages {
err := r.ReadMsg(&msg)
if i == len(messages) {
if !errors.Is(err, io.EOF) {
@@ -105,7 +105,7 @@ func TestReader_timeout(t *testing.T) {
r := tc.readerFunc()
var msg pb.Message
- for i := 0; i < len(messages); i++ {
+ for i := range messages {
var timeout time.Duration
if i == 0 {
timeout = 1000 * time.Millisecond
diff --git a/pkg/p2p/streamtest/streamtest.go b/pkg/p2p/streamtest/streamtest.go
index ae312624149..be491c3d5eb 100644
--- a/pkg/p2p/streamtest/streamtest.go
+++ b/pkg/p2p/streamtest/streamtest.go
@@ -38,6 +38,7 @@ type Recorder struct {
streamErr func(swarm.Address, string, string, string) error
pingErr func(ma.Multiaddr) (time.Duration, error)
protocolsWithPeers map[string]p2p.ProtocolSpec
+ messageLatency time.Duration
}
func WithProtocols(protocols ...p2p.ProtocolSpec) Option {
@@ -82,6 +83,12 @@ func WithPingErr(pingErr func(ma.Multiaddr) (time.Duration, error)) Option {
})
}
+func WithMessageLatency(latency time.Duration) Option {
+ return optionFunc(func(r *Recorder) {
+ r.messageLatency = latency
+ })
+}
+
func New(opts ...Option) *Recorder {
r := &Recorder{
records: make(map[string][]*Record),
@@ -115,8 +122,8 @@ func (r *Recorder) NewStream(ctx context.Context, addr swarm.Address, h p2p.Head
}
}
- recordIn := newRecord()
- recordOut := newRecord()
+ recordIn := newRecord(r.messageLatency)
+ recordOut := newRecord(r.messageLatency)
streamOut := newStream(recordIn, recordOut)
streamIn := newStream(recordOut, recordIn)
@@ -216,6 +223,12 @@ func (r *Recorder) WaitRecords(t *testing.T, addr swarm.Address, proto, version,
return recs
}
+// IsBee260 implements p2p.Bee260CompatibilityStreamer interface.
+// It always returns false.
+func (r *Recorder) IsBee260(overlay swarm.Address) bool {
+ return false
+}
+
type Record struct {
in *record
out *record
@@ -328,16 +341,20 @@ type record struct {
c int
lock sync.Mutex
dataSigC chan struct{}
+ latency time.Duration
closed bool
}
-func newRecord() *record {
+func newRecord(latency time.Duration) *record {
return &record{
dataSigC: make(chan struct{}, 16),
+ latency: latency,
}
}
func (r *record) Read(p []byte) (n int, err error) {
+ defer time.Sleep(r.latency)
+
for r.c == r.bytesSize() {
_, ok := <-r.dataSigC
if !ok {
@@ -348,10 +365,7 @@ func (r *record) Read(p []byte) (n int, err error) {
r.lock.Lock()
defer r.lock.Unlock()
- end := r.c + len(p)
- if end > len(r.b) {
- end = len(r.b)
- }
+ end := min(r.c+len(p), len(r.b))
n = copy(p, r.b[r.c:end])
r.c += n
@@ -359,6 +373,8 @@ func (r *record) Read(p []byte) (n int, err error) {
}
func (r *record) Write(p []byte) (int, error) {
+ defer time.Sleep(r.latency)
+
r.lock.Lock()
defer r.lock.Unlock()
diff --git a/pkg/p2p/streamtest/streamtest_test.go b/pkg/p2p/streamtest/streamtest_test.go
index 0b26d2da7ce..51b0d903ef2 100644
--- a/pkg/p2p/streamtest/streamtest_test.go
+++ b/pkg/p2p/streamtest/streamtest_test.go
@@ -12,6 +12,7 @@ import (
"io"
"strings"
"testing"
+ "testing/synctest"
"time"
"github.com/ethersphere/bee/v2/pkg/p2p"
@@ -785,6 +786,50 @@ func TestRecorder_ping(t *testing.T) {
}
}
+func TestRecorder_WithMessageLatency(t *testing.T) {
+ synctest.Test(t, func(t *testing.T) {
+ latency := 200 * time.Millisecond
+ recorder := streamtest.New(
+ streamtest.WithMessageLatency(latency),
+ streamtest.WithProtocols(
+ newTestProtocol(func(_ context.Context, peer p2p.Peer, stream p2p.Stream) error {
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+ if _, err := rw.ReadString('\n'); err != nil {
+ return err
+ }
+ if _, err := rw.WriteString("pong\n"); err != nil {
+ return err
+ }
+ return rw.Flush()
+ }),
+ ),
+ )
+
+ stream, err := recorder.NewStream(context.Background(), swarm.ZeroAddress, nil, testProtocolName, testProtocolVersion, testStreamName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer stream.Close()
+
+ start := time.Now()
+ if _, err := stream.Write([]byte("ping\n")); err != nil {
+ t.Fatal(err)
+ }
+ if duration := time.Since(start); duration < latency {
+ t.Errorf("write took %v, want >= %v", duration, latency)
+ }
+
+ start = time.Now()
+ rw := bufio.NewReader(stream)
+ if _, err := rw.ReadString('\n'); err != nil {
+ t.Fatal(err)
+ }
+ if duration := time.Since(start); duration < latency {
+ t.Errorf("read took %v, want >= %v", duration, latency)
+ }
+ })
+}
+
const (
testProtocolName = "testing"
testProtocolVersion = "1.0.1"
@@ -813,7 +858,7 @@ func testRecords(t *testing.T, records []*streamtest.Record, want [][2]string, w
t.Fatalf("got %v records, want %v", lr, lw)
}
- for i := 0; i < lr; i++ {
+ for i := range lr {
record := records[i]
if err := record.Err(); !errors.Is(err, wantErr) {
diff --git a/pkg/pingpong/pingpong_test.go b/pkg/pingpong/pingpong_test.go
index 6e0298b7cb9..99c71a6dce6 100644
--- a/pkg/pingpong/pingpong_test.go
+++ b/pkg/pingpong/pingpong_test.go
@@ -8,14 +8,13 @@ import (
"bytes"
"context"
"fmt"
- "runtime"
"testing"
+ "testing/synctest"
"time"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/log"
- "github.com/ethersphere/bee/v2/pkg/p2p"
"github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
"github.com/ethersphere/bee/v2/pkg/p2p/streamtest"
"github.com/ethersphere/bee/v2/pkg/pingpong"
@@ -23,91 +22,85 @@ import (
)
func TestPing(t *testing.T) {
- t.Parallel()
+ synctest.Test(t, func(t *testing.T) {
+ logger := log.Noop
- logger := log.Noop
+ // create a pingpong server that handles the incoming stream
+ server := pingpong.New(nil, logger, nil)
- // create a pingpong server that handles the incoming stream
- server := pingpong.New(nil, logger, nil)
+ messageLatency := 200 * time.Millisecond
- // setup the stream recorder to record stream data
- recorder := streamtest.New(
- streamtest.WithProtocols(server.Protocol()),
- streamtest.WithMiddlewares(func(f p2p.HandlerFunc) p2p.HandlerFunc {
- if runtime.GOOS == "windows" {
- // windows has a bit lower time resolution
- // so, slow down the handler with a middleware
- // not to get 0s for rtt value
- time.Sleep(100 * time.Millisecond)
- }
- return f
- }),
- )
+ // setup the stream recorder to record stream data
+ recorder := streamtest.New(
+ streamtest.WithProtocols(server.Protocol()),
+ streamtest.WithMessageLatency(messageLatency),
+ )
- // create a pingpong client that will do pinging
- client := pingpong.New(recorder, logger, nil)
+ // create a pingpong client that will do pinging
+ client := pingpong.New(recorder, logger, nil)
- // ping
- addr := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c")
- greetings := []string{"hey", "there", "fella"}
- rtt, err := client.Ping(context.Background(), addr, greetings...)
- if err != nil {
- t.Fatal(err)
- }
+ // ping
+ addr := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c")
+ greetings := []string{"hey", "there", "fella"}
+ rtt, err := client.Ping(context.Background(), addr, greetings...)
+ if err != nil {
+ t.Fatal(err)
+ }
- // check that RTT is a sane value
- if rtt <= 0 {
- t.Errorf("invalid RTT value %v", rtt)
- }
+ // check that RTT is the sum of all message latencies
+ if rtt != time.Duration(2*len(greetings))*messageLatency {
+ t.Errorf("invalid RTT value %v", rtt)
+ }
- // get a record for this stream
- records, err := recorder.Records(addr, "pingpong", "1.0.0", "pingpong")
- if err != nil {
- t.Fatal(err)
- }
- if l := len(records); l != 1 {
- t.Fatalf("got %v records, want %v", l, 1)
- }
- record := records[0]
+ // get a record for this stream
+ records, err := recorder.Records(addr, "pingpong", "1.0.0", "pingpong")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if l := len(records); l != 1 {
+ t.Fatalf("got %v records, want %v", l, 1)
+ }
+ record := records[0]
- // validate received ping greetings from the client
- wantGreetings := greetings
- messages, err := protobuf.ReadMessages(
- bytes.NewReader(record.In()),
- func() protobuf.Message { return new(pb.Ping) },
- )
- if err != nil {
- t.Fatal(err)
- }
- gotGreetings := make([]string, 0, len(messages))
- for _, m := range messages {
- gotGreetings = append(gotGreetings, m.(*pb.Ping).Greeting)
- }
- if fmt.Sprint(gotGreetings) != fmt.Sprint(wantGreetings) {
- t.Errorf("got greetings %v, want %v", gotGreetings, wantGreetings)
- }
+ // validate received ping greetings from the client
+ wantGreetings := greetings
+ messages, err := protobuf.ReadMessages(
+ bytes.NewReader(record.In()),
+ func() protobuf.Message { return new(pb.Ping) },
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotGreetings := make([]string, 0, len(messages))
+ for _, m := range messages {
+ gotGreetings = append(gotGreetings, m.(*pb.Ping).Greeting)
+ }
+ if fmt.Sprint(gotGreetings) != fmt.Sprint(wantGreetings) {
+ t.Errorf("got greetings %v, want %v", gotGreetings, wantGreetings)
+ }
- // validate sent pong responses by handler
- wantResponses := make([]string, 0, len(greetings))
- for _, g := range greetings {
- wantResponses = append(wantResponses, "{"+g+"}")
- }
- messages, err = protobuf.ReadMessages(
- bytes.NewReader(record.Out()),
- func() protobuf.Message { return new(pb.Pong) },
- )
- if err != nil {
- t.Fatal(err)
- }
- gotResponses := make([]string, 0, len(messages))
- for _, m := range messages {
- gotResponses = append(gotResponses, m.(*pb.Pong).Response)
- }
- if fmt.Sprint(gotResponses) != fmt.Sprint(wantResponses) {
- t.Errorf("got responses %v, want %v", gotResponses, wantResponses)
- }
+ // validate sent pong responses by handler
+ wantResponses := make([]string, 0, len(greetings))
+ for _, g := range greetings {
+ wantResponses = append(wantResponses, "{"+g+"}")
+ }
+ messages, err = protobuf.ReadMessages(
+ bytes.NewReader(record.Out()),
+ func() protobuf.Message { return new(pb.Pong) },
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotResponses := make([]string, 0, len(messages))
+ for _, m := range messages {
+ gotResponses = append(gotResponses, m.(*pb.Pong).Response)
+ }
+ if fmt.Sprint(gotResponses) != fmt.Sprint(wantResponses) {
+ t.Errorf("got responses %v, want %v", gotResponses, wantResponses)
+ }
- if err := record.Err(); err != nil {
- t.Fatal(err)
- }
+ if err := record.Err(); err != nil {
+ t.Fatal(err)
+ }
+ })
}
diff --git a/pkg/postage/batchservice/batchservice.go b/pkg/postage/batchservice/batchservice.go
index 6ad76b0bed4..cf3382c552d 100644
--- a/pkg/postage/batchservice/batchservice.go
+++ b/pkg/postage/batchservice/batchservice.go
@@ -40,6 +40,8 @@ type batchService struct {
checksum hash.Hash // checksum hasher
resync bool
+
+ pendingChainState *postage.ChainState
}
type Interface interface {
@@ -95,15 +97,22 @@ func New(
}
}
- return &batchService{stateStore, storer, logger.WithName(loggerName).Register(), listener, owner, batchListener, sum, resync}, nil
+ return &batchService{stateStore: stateStore, storer: storer, logger: logger.WithName(loggerName).Register(), listener: listener, owner: owner, batchListener: batchListener, checksum: sum, resync: resync}, nil
+}
+
+func (svc *batchService) getChainState() *postage.ChainState {
+ if svc.pendingChainState != nil {
+ return svc.pendingChainState
+ }
+ return svc.storer.GetChainState()
}
// Create will create a new batch with the given ID, owner value and depth and
// stores it in the BatchedStore.
func (svc *batchService) Create(id, owner []byte, totalAmout, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool, txHash common.Hash) error {
- // don't add batches which have value which equals total cumulative
+ // dont add batches which have value which equals total cumulative
// payout or that are going to expire already within the next couple of blocks
- val := big.NewInt(0).Add(svc.storer.GetChainState().TotalAmount, svc.storer.GetChainState().CurrentPrice)
+ val := big.NewInt(0).Add(svc.getChainState().TotalAmount, svc.getChainState().CurrentPrice)
if normalisedBalance.Cmp(val) <= 0 {
// don't do anything
return fmt.Errorf("batch service: batch %x: %w", id, ErrZeroValueBatch)
@@ -112,7 +121,7 @@ func (svc *batchService) Create(id, owner []byte, totalAmout, normalisedBalance
ID: id,
Owner: owner,
Value: normalisedBalance,
- Start: svc.storer.GetChainState().Block,
+ Start: svc.getChainState().Block,
Depth: depth,
BucketDepth: bucketDepth,
Immutable: immutable,
@@ -196,10 +205,13 @@ func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance *
// UpdatePrice implements the EventUpdater interface. It sets the current
// price from the chain in the service chain state.
func (svc *batchService) UpdatePrice(price *big.Int, txHash common.Hash) error {
- cs := svc.storer.GetChainState()
+ cs := svc.getChainState()
cs.CurrentPrice = price
- if err := svc.storer.PutChainState(cs); err != nil {
- return fmt.Errorf("put chain state: %w", err)
+
+ if svc.pendingChainState == nil {
+ if err := svc.storer.PutChainState(cs); err != nil {
+ return fmt.Errorf("put chain state: %w", err)
+ }
}
sum, err := svc.updateChecksum(txHash)
@@ -212,7 +224,7 @@ func (svc *batchService) UpdatePrice(price *big.Int, txHash common.Hash) error {
}
func (svc *batchService) UpdateBlockNumber(blockNumber uint64) error {
- cs := svc.storer.GetChainState()
+ cs := svc.getChainState()
if blockNumber == cs.Block {
return nil
}
@@ -223,30 +235,40 @@ func (svc *batchService) UpdateBlockNumber(blockNumber uint64) error {
cs.TotalAmount.Add(cs.TotalAmount, diff.Mul(diff, cs.CurrentPrice))
cs.Block = blockNumber
- if err := svc.storer.PutChainState(cs); err != nil {
- return fmt.Errorf("put chain state: %w", err)
+
+ if svc.pendingChainState == nil {
+ if err := svc.storer.PutChainState(cs); err != nil {
+ return fmt.Errorf("put chain state: %w", err)
+ }
}
svc.logger.Debug("block height updated", "new_block", blockNumber)
return nil
}
func (svc *batchService) TransactionStart() error {
+ svc.pendingChainState = svc.storer.GetChainState()
return svc.stateStore.Put(dirtyDBKey, true)
}
func (svc *batchService) TransactionEnd() error {
+ if svc.pendingChainState != nil {
+ if err := svc.storer.PutChainState(svc.pendingChainState); err != nil {
+ return fmt.Errorf("put chain state: %w", err)
+ }
+ svc.pendingChainState = nil
+ }
return svc.stateStore.Delete(dirtyDBKey)
}
var ErrInterruped = errors.New("postage sync interrupted")
-func (svc *batchService) Start(ctx context.Context, startBlock uint64, initState *postage.ChainSnapshot) (err error) {
+func (svc *batchService) Start(ctx context.Context, startBlock uint64) (err error) {
dirty := false
err = svc.stateStore.Get(dirtyDBKey, &dirty)
if err != nil && !errors.Is(err, storage.ErrNotFound) {
return err
}
- if dirty || svc.resync || initState != nil {
+ if dirty || svc.resync {
if dirty {
svc.logger.Warning("batch service: dirty shutdown detected, resetting batch store")
@@ -268,11 +290,7 @@ func (svc *batchService) Start(ctx context.Context, startBlock uint64, initState
startBlock = cs.Block
}
- if initState != nil && initState.LastBlockNumber > startBlock {
- startBlock = initState.LastBlockNumber
- }
-
- syncedChan := svc.listener.Listen(ctx, startBlock+1, svc, initState)
+ syncedChan := svc.listener.Listen(ctx, startBlock+1, svc)
return <-syncedChan
}
diff --git a/pkg/postage/batchservice/batchservice_test.go b/pkg/postage/batchservice/batchservice_test.go
index 9500acaba28..2a9ef622c10 100644
--- a/pkg/postage/batchservice/batchservice_test.go
+++ b/pkg/postage/batchservice/batchservice_test.go
@@ -32,7 +32,7 @@ var (
type mockListener struct {
}
-func (*mockListener) Listen(ctx context.Context, from uint64, updater postage.EventUpdater, _ *postage.ChainSnapshot) <-chan error {
+func (*mockListener) Listen(ctx context.Context, from uint64, updater postage.EventUpdater) <-chan error {
c := make(chan error, 1)
c <- nil
return c
@@ -514,7 +514,7 @@ func TestTransactionOk(t *testing.T) {
t.Parallel()
svc, store, s := newTestStoreAndService(t)
- if err := svc.Start(context.Background(), 10, nil); err != nil {
+ if err := svc.Start(context.Background(), 10); err != nil {
t.Fatal(err)
}
@@ -530,7 +530,7 @@ func TestTransactionOk(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := svc2.Start(context.Background(), 10, nil); err != nil {
+ if err := svc2.Start(context.Background(), 10); err != nil {
t.Fatal(err)
}
@@ -543,7 +543,7 @@ func TestTransactionError(t *testing.T) {
t.Parallel()
svc, store, s := newTestStoreAndService(t)
- if err := svc.Start(context.Background(), 10, nil); err != nil {
+ if err := svc.Start(context.Background(), 10); err != nil {
t.Fatal(err)
}
@@ -555,7 +555,7 @@ func TestTransactionError(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := svc2.Start(context.Background(), 10, nil); err != nil {
+ if err := svc2.Start(context.Background(), 10); err != nil {
t.Fatal(err)
}
diff --git a/pkg/postage/batchstore/mock/store_test.go b/pkg/postage/batchstore/mock/store_test.go
index d376c359819..8100da0dac7 100644
--- a/pkg/postage/batchstore/mock/store_test.go
+++ b/pkg/postage/batchstore/mock/store_test.go
@@ -32,7 +32,7 @@ func TestBatchStore(t *testing.T) {
}
// Update should return error after a number of tries:
- for i := 0; i < testCnt; i++ {
+ for range testCnt {
if err := batchStore.Update(testBatch, big.NewInt(0), 0); err != nil {
t.Fatal(err)
}
@@ -45,7 +45,7 @@ func TestBatchStore(t *testing.T) {
if _, err := batchStore.Get(postagetesting.MustNewID()); err == nil {
t.Fatal("expected error")
}
- for i := 0; i < testCnt-1; i++ {
+ for range testCnt - 1 {
if _, err := batchStore.Get(testBatch.ID); err != nil {
t.Fatal(err)
}
@@ -67,7 +67,7 @@ func TestBatchStorePutChainState(t *testing.T) {
)
// PutChainState should return an error after a number of tries:
- for i := 0; i < testCnt; i++ {
+ for range testCnt {
if err := batchStore.PutChainState(testChainState); err != nil {
t.Fatal(err)
}
diff --git a/pkg/postage/batchstore/store_test.go b/pkg/postage/batchstore/store_test.go
index 603dd24f398..975c93f3e18 100644
--- a/pkg/postage/batchstore/store_test.go
+++ b/pkg/postage/batchstore/store_test.go
@@ -602,7 +602,7 @@ func addBatch(t *testing.T, s postage.Storer, depth uint8, value int) *postage.B
return batch
}
-func stateStoreGet(t *testing.T, st storage.StateStorer, k string, v interface{}) {
+func stateStoreGet(t *testing.T, st storage.StateStorer, k string, v any) {
t.Helper()
if err := st.Get(k, v); err != nil {
@@ -610,7 +610,7 @@ func stateStoreGet(t *testing.T, st storage.StateStorer, k string, v interface{}
}
}
-func stateStorePut(t *testing.T, st storage.StateStorer, k string, v interface{}) {
+func stateStorePut(t *testing.T, st storage.StateStorer, k string, v any) {
t.Helper()
if err := st.Put(k, v); err != nil {
diff --git a/pkg/postage/interface.go b/pkg/postage/interface.go
index 2288fb9fcbf..0f8c9f976d6 100644
--- a/pkg/postage/interface.go
+++ b/pkg/postage/interface.go
@@ -10,7 +10,6 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
)
// EventUpdater interface definitions reflect the updates triggered by events
@@ -21,23 +20,12 @@ type EventUpdater interface {
UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int, txHash common.Hash) error
UpdatePrice(price *big.Int, txHash common.Hash) error
UpdateBlockNumber(blockNumber uint64) error
- Start(ctx context.Context, startBlock uint64, initState *ChainSnapshot) error
+ Start(ctx context.Context, startBlock uint64) error
TransactionStart() error
TransactionEnd() error
}
-// ChainSnapshot represents the snapshot of all the postage events between the
-// FirstBlockNumber and LastBlockNumber. The timestamp stores the time at which the
-// snapshot was generated. This snapshot can be used to sync the postage package
-// to prevent large no. of chain backend calls.
-type ChainSnapshot struct {
- Events []types.Log `json:"events"`
- LastBlockNumber uint64 `json:"lastBlockNumber"`
- FirstBlockNumber uint64 `json:"firstBlockNumber"`
- Timestamp int64 `json:"timestamp"`
-}
-
// Storer represents the persistence layer for batches
// on the current (highest available) block.
type Storer interface {
@@ -90,7 +78,7 @@ type ChainStateGetter interface {
// Listener provides a blockchain event iterator.
type Listener interface {
io.Closer
- Listen(ctx context.Context, from uint64, updater EventUpdater, initState *ChainSnapshot) <-chan error
+ Listen(ctx context.Context, from uint64, updater EventUpdater) <-chan error
}
type BatchEventListener interface {
diff --git a/pkg/postage/listener/listener.go b/pkg/postage/listener/listener.go
index d9d52b2c5a1..53feb3a5299 100644
--- a/pkg/postage/listener/listener.go
+++ b/pkg/postage/listener/listener.go
@@ -29,6 +29,7 @@ const loggerName = "listener"
const (
blockPage = 5000 // how many blocks to sync every time we page
+ blockPageSnapshot = 50000 // how many blocks to sync every time from snapshot
tailSize = 4 // how many blocks to tail from the tip of the chain
defaultBatchFactor = uint64(5) // minimal number of blocks to sync at once
)
@@ -186,7 +187,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
}
}
-func (l *listener) Listen(ctx context.Context, from uint64, updater postage.EventUpdater, initState *postage.ChainSnapshot) <-chan error {
+func (l *listener) Listen(ctx context.Context, from uint64, updater postage.EventUpdater) <-chan error {
ctx, cancel := context.WithCancel(ctx)
go func() {
<-l.quit
@@ -226,13 +227,6 @@ func (l *listener) Listen(ctx context.Context, from uint64, updater postage.Even
return nil
}
- if initState != nil {
- err := processEvents(initState.Events, initState.LastBlockNumber+1)
- if err != nil {
- l.logger.Error(err, "failed bootstrapping from initial state")
- }
- }
-
batchFactor, err := strconv.ParseUint(batchFactorOverridePublic, 10, 64)
if err != nil {
l.logger.Warning("batch factor conversation failed", "batch_factor", batchFactor, "error", err)
@@ -241,6 +235,15 @@ func (l *listener) Listen(ctx context.Context, from uint64, updater postage.Even
l.logger.Debug("batch factor", "value", batchFactor)
+ // Type assertion to detect if backend is SnapshotLogFilterer
+ pageSize := uint64(blockPage)
+ if _, isSnapshot := l.ev.(interface{ GetBatchSnapshot() []byte }); isSnapshot {
+ pageSize = blockPageSnapshot
+ l.logger.Debug("using snapshot page size", "page_size", pageSize)
+ } else {
+ l.logger.Debug("using standard page size", "page_size", pageSize)
+ }
+
synced := make(chan error)
closeOnce := new(sync.Once)
paged := true
@@ -321,9 +324,9 @@ func (l *listener) Listen(ctx context.Context, from uint64, updater postage.Even
}
// do some paging (sub-optimal)
- if to-from >= blockPage {
+ if to-from >= pageSize {
paged = true
- to = from + blockPage - 1
+ to = from + pageSize - 1
} else {
closeOnce.Do(func() { synced <- nil })
}
diff --git a/pkg/postage/listener/listener_test.go b/pkg/postage/listener/listener_test.go
index e3ee957a3e8..bb34086bb6a 100644
--- a/pkg/postage/listener/listener_test.go
+++ b/pkg/postage/listener/listener_test.go
@@ -18,7 +18,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
chaincfg "github.com/ethersphere/bee/v2/pkg/config"
"github.com/ethersphere/bee/v2/pkg/log"
- "github.com/ethersphere/bee/v2/pkg/postage"
"github.com/ethersphere/bee/v2/pkg/postage/listener"
"github.com/ethersphere/bee/v2/pkg/util/abiutil"
"github.com/ethersphere/bee/v2/pkg/util/syncutil"
@@ -81,7 +80,7 @@ func TestListener(t *testing.T) {
backoffTime,
)
testutil.CleanupCloser(t, l)
- <-l.Listen(context.Background(), 0, ev, nil)
+ <-l.Listen(context.Background(), 0, ev)
select {
case e := <-ev.eventC:
@@ -122,7 +121,7 @@ func TestListener(t *testing.T) {
backoffTime,
)
testutil.CleanupCloser(t, l)
- <-l.Listen(context.Background(), 0, ev, nil)
+ <-l.Listen(context.Background(), 0, ev)
select {
case e := <-ev.eventC:
@@ -164,7 +163,7 @@ func TestListener(t *testing.T) {
)
testutil.CleanupCloser(t, l)
- <-l.Listen(context.Background(), 0, ev, nil)
+ <-l.Listen(context.Background(), 0, ev)
select {
case e := <-ev.eventC:
@@ -203,7 +202,7 @@ func TestListener(t *testing.T) {
backoffTime,
)
testutil.CleanupCloser(t, l)
- <-l.Listen(context.Background(), 0, ev, nil)
+ <-l.Listen(context.Background(), 0, ev)
select {
case e := <-ev.eventC:
@@ -266,7 +265,7 @@ func TestListener(t *testing.T) {
backoffTime,
)
testutil.CleanupCloser(t, l)
- <-l.Listen(context.Background(), 0, ev, nil)
+ <-l.Listen(context.Background(), 0, ev)
select {
case e := <-ev.eventC:
@@ -346,7 +345,7 @@ func TestListener(t *testing.T) {
0,
)
testutil.CleanupCloser(t, l)
- <-l.Listen(context.Background(), 0, ev, nil)
+ <-l.Listen(context.Background(), 0, ev)
select {
case e := <-ev.eventC:
@@ -374,7 +373,7 @@ func TestListener(t *testing.T) {
0,
)
testutil.CleanupCloser(t, l)
- <-l.Listen(context.Background(), 0, ev, nil)
+ <-l.Listen(context.Background(), 0, ev)
select {
case <-c.C:
@@ -401,7 +400,7 @@ func TestListener(t *testing.T) {
backoffTime,
)
testutil.CleanupCloser(t, l)
- <-l.Listen(context.Background(), 0, ev, nil)
+ <-l.Listen(context.Background(), 0, ev)
select {
case e := <-ev.eventC:
@@ -418,137 +417,21 @@ func TestListener(t *testing.T) {
})
}
-func TestListenerBatchState(t *testing.T) {
- t.Parallel()
-
- ev := newEventUpdaterMock()
- mf := newMockFilterer()
-
- create := createArgs{
- id: hash[:],
- owner: addr[:],
- amount: big.NewInt(42),
- normalisedAmount: big.NewInt(43),
- depth: 100,
- }
-
- topup := topupArgs{
- id: hash[:],
- amount: big.NewInt(0),
- normalisedBalance: big.NewInt(1),
- }
-
- depthIncrease := depthArgs{
- id: hash[:],
- depth: 200,
- normalisedBalance: big.NewInt(2),
- }
-
- priceUpdate := priceArgs{
- price: big.NewInt(500),
- }
-
- snapshot := &postage.ChainSnapshot{
- Events: []types.Log{
- create.toLog(496),
- topup.toLog(497),
- depthIncrease.toLog(498),
- priceUpdate.toLog(499),
- },
- FirstBlockNumber: 496,
- LastBlockNumber: 499,
- Timestamp: time.Now().Unix(),
- }
-
- stop := make(chan struct{})
- done := make(chan struct{})
- errs := make(chan error)
- noOfEvents := 0
-
- go func() {
- for {
- select {
- case <-stop:
- return
- case e := <-ev.eventC:
- noOfEvents++
- switch ev := e.(type) {
- case blockNumberCall:
- if ev.blockNumber < 497 && ev.blockNumber > 500 {
- errs <- fmt.Errorf("invalid blocknumber call %d", ev.blockNumber)
- return
- }
- if ev.blockNumber == 500 {
- close(done)
- return
- }
- case createArgs:
- if err := ev.compare(create); err != nil {
- errs <- err
- return
- }
- case topupArgs:
- if err := ev.compare(topup); err != nil {
- errs <- err
- return
- }
- case depthArgs:
- if err := ev.compare(depthIncrease); err != nil {
- errs <- err
- return
- }
- case priceArgs:
- if err := ev.compare(priceUpdate); err != nil {
- errs <- err
- return
- }
- }
- }
- }
- }()
-
- l := listener.New(
- nil,
- log.Noop,
- mf,
- postageStampContractAddress,
- postageStampContractABI,
- 1,
- stallingTimeout,
- backoffTime,
- )
- testutil.CleanupCloser(t, l)
- l.Listen(context.Background(), snapshot.LastBlockNumber+1, ev, snapshot)
-
- defer close(stop)
-
- select {
- case <-time.After(5 * time.Second):
- t.Fatal("timedout waiting for events to be processed", noOfEvents)
- case err := <-errs:
- t.Fatal(err)
- case <-done:
- if noOfEvents != 9 {
- t.Fatal("invalid count of events on completion", noOfEvents)
- }
- }
-}
-
func newEventUpdaterMock() *updater {
return &updater{
- eventC: make(chan interface{}, 1),
+ eventC: make(chan any, 1),
}
}
func newEventUpdaterMockWithBlockNumberUpdateError(err error) *updater {
return &updater{
- eventC: make(chan interface{}, 1),
+ eventC: make(chan any, 1),
blockNumberUpdateError: err,
}
}
type updater struct {
- eventC chan interface{}
+ eventC chan any
blockNumberUpdateError error
}
@@ -595,7 +478,7 @@ func (u *updater) UpdateBlockNumber(blockNumber uint64) error {
return u.blockNumberUpdateError
}
-func (u *updater) Start(ctx context.Context, bno uint64, cs *postage.ChainSnapshot) error {
+func (u *updater) Start(ctx context.Context, bno uint64) error {
return nil
}
diff --git a/pkg/postage/postagecontract/contract.go b/pkg/postage/postagecontract/contract.go
index d9645822166..e8570b64e9b 100644
--- a/pkg/postage/postagecontract/contract.go
+++ b/pkg/postage/postagecontract/contract.go
@@ -390,7 +390,7 @@ func (c *postageContract) CreateBatch(ctx context.Context, initialBalance *big.I
if err != nil {
return
}
- txHash = receipt.TxHash
+
for _, ev := range receipt.Logs {
if ev.Address == c.postageStampContractAddress && len(ev.Topics) > 0 && ev.Topics[0] == c.batchCreatedTopic {
var createdEvent batchCreatedEvent
@@ -415,6 +415,7 @@ func (c *postageContract) CreateBatch(ctx context.Context, initialBalance *big.I
if err != nil {
return
}
+ txHash = receipt.TxHash
return
}
}
@@ -447,7 +448,6 @@ func (c *postageContract) TopUpBatch(ctx context.Context, batchID []byte, topupB
receipt, err := c.sendTopUpBatchTransaction(ctx, batch.ID, topupBalance)
if err != nil {
- txHash = receipt.TxHash
return
}
@@ -483,9 +483,10 @@ func (c *postageContract) DiluteBatch(ctx context.Context, batchID []byte, newDe
if err != nil {
return
}
- txHash = receipt.TxHash
+
for _, ev := range receipt.Logs {
if ev.Address == c.postageStampContractAddress && len(ev.Topics) > 0 && ev.Topics[0] == c.batchDepthIncreaseTopic {
+ txHash = receipt.TxHash
return
}
}
diff --git a/pkg/postage/postagecontract/contract_test.go b/pkg/postage/postagecontract/contract_test.go
index 756bbe24a9d..80da7a8ab0d 100644
--- a/pkg/postage/postagecontract/contract_test.go
+++ b/pkg/postage/postagecontract/contract_test.go
@@ -76,9 +76,10 @@ func TestCreateBatch(t *testing.T) {
bzzTokenAddress,
transactionMock.New(
transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) {
- if *request.To == bzzTokenAddress {
+ switch *request.To {
+ case bzzTokenAddress:
return txHashApprove, nil
- } else if *request.To == postageStampAddress {
+ case postageStampAddress:
if bytes.Equal(expectedCallDataForExpireLimitedBatches[:32], request.Data[:32]) {
return txHashApprove, nil
}
@@ -90,11 +91,12 @@ func TestCreateBatch(t *testing.T) {
return common.Hash{}, errors.New("sent to wrong contract")
}),
transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) {
- if txHash == txHashApprove {
+ switch txHash {
+ case txHashApprove:
return &types.Receipt{
Status: 1,
}, nil
- } else if txHash == txHashCreate {
+ case txHashCreate:
return &types.Receipt{
Logs: []*types.Log{
newCreateEvent(postageStampAddress, batchID),
@@ -321,9 +323,10 @@ func TestTopUpBatch(t *testing.T) {
bzzTokenAddress,
transactionMock.New(
transactionMock.WithSendFunc(func(ctx context.Context, request *transaction.TxRequest, boost int) (txHash common.Hash, err error) {
- if *request.To == bzzTokenAddress {
+ switch *request.To {
+ case bzzTokenAddress:
return txHashApprove, nil
- } else if *request.To == postageStampAddress {
+ case postageStampAddress:
if !bytes.Equal(expectedCallData[:64], request.Data[:64]) {
return common.Hash{}, fmt.Errorf("got wrong call data. wanted %x, got %x", expectedCallData, request.Data)
}
@@ -332,11 +335,12 @@ func TestTopUpBatch(t *testing.T) {
return common.Hash{}, errors.New("sent to wrong contract")
}),
transactionMock.WithWaitForReceiptFunc(func(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) {
- if txHash == txHashApprove {
+ switch txHash {
+ case txHashApprove:
return &types.Receipt{
Status: 1,
}, nil
- } else if txHash == txHashTopup {
+ case txHashTopup:
return &types.Receipt{
Logs: []*types.Log{
newTopUpEvent(postageStampAddress, batch),
diff --git a/pkg/postage/service_test.go b/pkg/postage/service_test.go
index 1ae9dfd1b46..1237fecc066 100644
--- a/pkg/postage/service_test.go
+++ b/pkg/postage/service_test.go
@@ -36,7 +36,7 @@ func TestSaveLoad(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- for i := 0; i < 16; i++ {
+ for range 16 {
err := ps.Add(newTestStampIssuer(t, 1000))
if err != nil {
t.Fatal(err)
diff --git a/pkg/postage/stamper_test.go b/pkg/postage/stamper_test.go
index 383c534a0c4..f362ee8e23c 100644
--- a/pkg/postage/stamper_test.go
+++ b/pkg/postage/stamper_test.go
@@ -72,7 +72,7 @@ func TestStamperStamping(t *testing.T) {
chunkAddr, _ := createStamp(t, stamper)
// issue another 15
// collision depth is 8, committed batch depth is 12, bucket volume 2^4
- for i := 0; i < 14; i++ {
+ for i := range 14 {
randAddr := swarm.RandAddressAt(t, chunkAddr, 8)
_, err = stamper.Stamp(randAddr, randAddr)
if err != nil {
@@ -98,7 +98,7 @@ func TestStamperStamping(t *testing.T) {
chunkAddr, _ := createStamp(t, stamper)
// issue another 15
// collision depth is 8, committed batch depth is 12, bucket volume 2^4
- for i := 0; i < 15; i++ {
+ for i := range 15 {
randAddr := swarm.RandAddressAt(t, chunkAddr, 8)
_, err = stamper.Stamp(randAddr, randAddr)
if err != nil {
diff --git a/pkg/postage/stampissuer_test.go b/pkg/postage/stampissuer_test.go
index 5e7bfa71cbc..05c177360a7 100644
--- a/pkg/postage/stampissuer_test.go
+++ b/pkg/postage/stampissuer_test.go
@@ -176,7 +176,7 @@ func Test_StampIssuer_inc(t *testing.T) {
count := sti.BucketUpperBound()
// Increment to upper bound (fill bucket to max cap)
- for i := uint32(0); i < count; i++ {
+ for range count {
_, _, err := sti.Increment(addr)
if err != nil {
t.Fatal(err)
@@ -184,7 +184,7 @@ func Test_StampIssuer_inc(t *testing.T) {
}
// Incrementing stamp issuer above upper bound should return index starting from 0
- for i := uint32(0); i < count; i++ {
+ for i := range count {
idxb, _, err := sti.Increment(addr)
if err != nil {
t.Fatal(err)
@@ -203,7 +203,7 @@ func Test_StampIssuer_inc(t *testing.T) {
count := sti.BucketUpperBound()
// Increment to upper bound (fill bucket to max cap)
- for i := uint32(0); i < count; i++ {
+ for range count {
_, _, err := sti.Increment(addr)
if err != nil {
t.Fatal(err)
@@ -211,7 +211,7 @@ func Test_StampIssuer_inc(t *testing.T) {
}
// Incrementing stamp issuer above upper bound should return error
- for i := uint32(0); i < count; i++ {
+ for range count {
_, _, err := sti.Increment(addr)
if !errors.Is(err, postage.ErrBucketFull) {
t.Fatal("bucket should be full")
@@ -230,7 +230,7 @@ func TestUtilization(t *testing.T) {
var eg errgroup.Group
- for i := 0; i < 8; i++ {
+ for range 8 {
eg.Go(func() error {
for {
_, _, err := sti.Increment(swarm.RandAddress(t))
diff --git a/pkg/pss/mining_test.go b/pkg/pss/mining_test.go
index 4dd94e45bb2..94e469bf6ca 100644
--- a/pkg/pss/mining_test.go
+++ b/pkg/pss/mining_test.go
@@ -16,7 +16,7 @@ import (
func newTargets(length, depth int) pss.Targets {
targets := make([]pss.Target, length)
- for i := 0; i < length; i++ {
+ for i := range length {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(i))
targets[i] = pss.Target(buf[:depth])
@@ -51,7 +51,7 @@ func BenchmarkWrap(b *testing.B) {
name := fmt.Sprintf("length:%d,depth:%d", c.length, c.depth)
b.Run(name, func(b *testing.B) {
targets := newTargets(c.length, c.depth)
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
if _, err := pss.Wrap(ctx, topic, msg, pubkey, targets); err != nil {
b.Fatal(err)
}
diff --git a/pkg/pss/pss.go b/pkg/pss/pss.go
index 454f5960129..28319e9a615 100644
--- a/pkg/pss/pss.go
+++ b/pkg/pss/pss.go
@@ -130,7 +130,7 @@ func (p *pss) Register(topic Topic, handler Handler) (cleanup func()) {
defer p.handlersMu.Unlock()
h := p.handlers[topic]
- for i := 0; i < len(h); i++ {
+ for i := range h {
if h[i] == &handler {
p.handlers[topic] = append(h[:i], h[i+1:]...)
return
diff --git a/pkg/pss/pss_test.go b/pkg/pss/pss_test.go
index 685adb59fb9..2482f9b2c7b 100644
--- a/pkg/pss/pss_test.go
+++ b/pkg/pss/pss_test.go
@@ -219,7 +219,7 @@ func TestRegister(t *testing.T) {
func waitHandlerCallback(t *testing.T, msgChan *chan struct{}, count int) {
t.Helper()
- for received := 0; received < count; received++ {
+ for range count {
select {
case <-*msgChan:
case <-time.After(1 * time.Second):
diff --git a/pkg/pss/trojan.go b/pkg/pss/trojan.go
index b6b404f2389..8316ae6932e 100644
--- a/pkg/pss/trojan.go
+++ b/pkg/pss/trojan.go
@@ -215,7 +215,7 @@ func mine(ctx context.Context, odd bool, f func(nonce []byte) (swarm.Chunk, erro
defer cancel()
eg, ctx := errgroup.WithContext(ctx)
result := make(chan swarm.Chunk, 8)
- for i := 0; i < 8; i++ {
+ for range 8 {
eg.Go(func() error {
nonce := make([]byte, 32)
copy(nonce, initnonce)
diff --git a/pkg/puller/puller.go b/pkg/puller/puller.go
index 2fdbc24e9cf..0bcdcfae9cb 100644
--- a/pkg/puller/puller.go
+++ b/pkg/puller/puller.go
@@ -191,9 +191,7 @@ func (p *Puller) manage(ctx context.Context) {
// disconnectPeer cancels all existing syncing and removes the peer entry from the syncing map.
// Must be called under lock.
func (p *Puller) disconnectPeer(addr swarm.Address) {
- loggerV2 := p.logger.V(2).Register()
-
- loggerV2.Debug("disconnecting peer", "peer_address", addr)
+ p.logger.Debug("disconnecting peer", "peer_address", addr)
if peer, ok := p.syncPeers[addr.ByteString()]; ok {
peer.mtx.Lock()
peer.stop()
@@ -268,7 +266,7 @@ func (p *Puller) syncPeer(ctx context.Context, peer *syncPeer, storageRadius uin
if peer.po >= storageRadius {
// cancel all bins lower than the storage radius
- for bin := uint8(0); bin < storageRadius; bin++ {
+ for bin := range storageRadius {
peer.cancelBin(bin)
}
@@ -300,8 +298,6 @@ func (p *Puller) syncPeer(ctx context.Context, peer *syncPeer, storageRadius uin
// syncPeerBin will start historical and live syncing for the peer for a particular bin.
// Must be called under syncPeer lock.
func (p *Puller) syncPeerBin(parentCtx context.Context, peer *syncPeer, bin uint8, cursor uint64) {
- loggerV2 := p.logger.V(2).Register()
-
ctx, cancel := context.WithCancel(parentCtx)
peer.setBinCancel(cancel, bin)
@@ -331,14 +327,13 @@ func (p *Puller) syncPeerBin(parentCtx context.Context, peer *syncPeer, bin uint
select {
case <-ctx.Done():
- loggerV2.Debug("syncWorker context cancelled", "peer_address", address, "bin", bin)
+ p.logger.Debug("syncWorker context cancelled", "peer_address", address, "bin", bin)
return
default:
}
p.metrics.SyncWorkerIterCounter.Inc()
- syncStart := time.Now()
top, count, err := p.syncer.Sync(ctx, address, bin, start)
if top == math.MaxUint64 {
@@ -353,7 +348,7 @@ func (p *Puller) syncPeerBin(parentCtx context.Context, peer *syncPeer, bin uint
p.logger.Debug("syncWorker interval failed, quitting", "error", err, "peer_address", address, "bin", bin, "cursor", cursor, "start", start, "topmost", top)
return
}
- loggerV2.Debug("syncWorker interval failed", "error", err, "peer_address", address, "bin", bin, "cursor", cursor, "start", start, "topmost", top)
+ p.logger.Debug("syncWorker interval failed", "error", err, "peer_address", address, "bin", bin, "cursor", cursor, "start", start, "topmost", top)
}
_ = p.limiter.WaitN(ctx, count)
@@ -372,7 +367,6 @@ func (p *Puller) syncPeerBin(parentCtx context.Context, peer *syncPeer, bin uint
p.logger.Error(err, "syncWorker could not persist interval for peer, quitting", "peer_address", address)
return
}
- loggerV2.Debug("syncWorker pulled", "bin", bin, "start", start, "topmost", top, "isHistorical", isHistorical, "duration", time.Since(syncStart), "peer_address", address)
start = top + 1
}
}
diff --git a/pkg/pullsync/pullsync.go b/pkg/pullsync/pullsync.go
index 064099e765c..436600a2f76 100644
--- a/pkg/pullsync/pullsync.go
+++ b/pkg/pullsync/pullsync.go
@@ -49,7 +49,6 @@ const (
MaxCursor = math.MaxUint64
DefaultMaxPage uint64 = 250
pageTimeout = time.Second
- makeOfferTimeout = 15 * time.Minute
handleMaxChunksPerSecond = 250
handleRequestsLimitRate = time.Second / handleMaxChunksPerSecond // handle max `handleMaxChunksPerSecond` chunks per second per peer
)
@@ -228,7 +227,7 @@ func (s *Syncer) handler(streamCtx context.Context, p p2p.Peer, stream p2p.Strea
// Sync syncs a batch of chunks starting at a start BinID.
// It returns the BinID of highest chunk that was synced from the given
// batch and the total number of chunks the downstream peer has sent.
-func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start uint64) (uint64, int, error) {
+func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start uint64) (topmost uint64, count int, err error) {
stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
if err != nil {
@@ -237,7 +236,6 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start
defer func() {
if err != nil {
_ = stream.Reset()
- s.logger.Debug("error syncing peer", "peer_address", peer, "bin", bin, "start", start, "error", err)
} else {
stream.FullClose()
}
@@ -261,7 +259,7 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start
return offer.Topmost, 0, nil
}
- topmost := offer.Topmost
+ topmost = offer.Topmost
var (
bvLen = len(offer.Chunks)
@@ -403,9 +401,6 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start
// makeOffer tries to assemble an offer for a given requested interval.
func (s *Syncer) makeOffer(ctx context.Context, rn pb.Get) (*pb.Offer, error) {
- ctx, cancel := context.WithTimeout(ctx, makeOfferTimeout)
- defer cancel()
-
addrs, top, err := s.collectAddrs(ctx, uint8(rn.Bin), rn.Start)
if err != nil {
return nil, err
@@ -430,8 +425,6 @@ type collectAddrsResult struct {
// After the arrival of the first chunk, the subsequent chunks have a limited amount of time to arrive,
// after which the function returns the collected slice of chunks.
func (s *Syncer) collectAddrs(ctx context.Context, bin uint8, start uint64) ([]*storer.BinC, uint64, error) {
- loggerV2 := s.logger.V(2).Register()
-
v, _, err := s.intervalsSF.Do(ctx, sfKey(bin, start), func(ctx context.Context) (*collectAddrsResult, error) {
var (
chs []*storer.BinC
@@ -476,7 +469,7 @@ func (s *Syncer) collectAddrs(ctx context.Context, bin uint8, start uint64) ([]*
case <-ctx.Done():
return nil, ctx.Err()
case <-timerC:
- loggerV2.Debug("batch timeout timer triggered")
+ s.logger.Debug("batch timeout timer triggered")
// return batch if new chunks are not received after some time
break LOOP
}
@@ -518,17 +511,14 @@ func (s *Syncer) processWant(ctx context.Context, o *pb.Offer, w *pb.Want) ([]sw
}
func (s *Syncer) GetCursors(ctx context.Context, peer swarm.Address) (retr []uint64, epoch uint64, err error) {
- loggerV2 := s.logger.V(2).Register()
-
stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, cursorStreamName)
if err != nil {
return nil, 0, fmt.Errorf("new stream: %w", err)
}
- loggerV2.Debug("getting cursors from peer", "peer_address", peer)
+ s.logger.Debug("getting cursors from peer", "peer_address", peer)
defer func() {
if err != nil {
_ = stream.Reset()
- loggerV2.Debug("error getting cursors from peer", "peer_address", peer, "error", err)
} else {
stream.FullClose()
}
@@ -549,14 +539,11 @@ func (s *Syncer) GetCursors(ctx context.Context, peer swarm.Address) (retr []uin
}
func (s *Syncer) cursorHandler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
- loggerV2 := s.logger.V(2).Register()
-
w, r := protobuf.NewWriterAndReader(stream)
- loggerV2.Debug("peer wants cursors", "peer_address", p.Address)
+ s.logger.Debug("peer wants cursors", "peer_address", p.Address)
defer func() {
if err != nil {
_ = stream.Reset()
- loggerV2.Debug("error getting cursors for peer", "peer_address", p.Address, "error", err)
} else {
_ = stream.FullClose()
}
diff --git a/pkg/pullsync/pullsync_test.go b/pkg/pullsync/pullsync_test.go
index fc80bae137f..03d6927c1da 100644
--- a/pkg/pullsync/pullsync_test.go
+++ b/pkg/pullsync/pullsync_test.go
@@ -9,6 +9,7 @@ import (
"errors"
"io"
"testing"
+ "testing/synctest"
"time"
"github.com/ethersphere/bee/v2/pkg/log"
@@ -44,7 +45,7 @@ func init() {
chunks = make([]swarm.Chunk, n)
addrs = make([]swarm.Address, n)
results = make([]*storer.BinC, n)
- for i := 0; i < n; i++ {
+ for i := range n {
chunks[i] = testingc.GenerateTestRandomChunk()
addrs[i] = chunks[i].Address()
stampHash, _ := chunks[i].Stamp().Hash()
@@ -58,256 +59,256 @@ func init() {
}
func TestIncoming_WantNone(t *testing.T) {
- t.Parallel()
-
- var (
- topMost = uint64(4)
- ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, clientDb = newPullSync(t, recorder, 0, mock.WithChunks(chunks...))
- )
-
- topmost, _, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
- if err != nil {
- t.Fatal(err)
- }
+ synctest.Test(t, func(t *testing.T) {
+ var (
+ topMost = uint64(4)
+ ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, clientDb = newPullSync(t, recorder, 0, mock.WithChunks(chunks...))
+ )
+
+ topmost, _, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
- if topmost != topMost {
- t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
- }
- if clientDb.PutCalls() > 0 {
- t.Fatal("too many puts")
- }
+ if topmost != topMost {
+ t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
+ }
+ if clientDb.PutCalls() > 0 {
+ t.Fatal("too many puts")
+ }
+ })
}
func TestIncoming_ContextTimeout(t *testing.T) {
- t.Parallel()
-
- var (
- ps, _ = newPullSync(t, nil, 0, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, _ = newPullSync(t, recorder, 0, mock.WithChunks(chunks...))
- )
-
- ctx, cancel := context.WithTimeout(context.Background(), 0)
- cancel()
- _, _, err := psClient.Sync(ctx, swarm.ZeroAddress, 0, 0)
- if !errors.Is(err, context.DeadlineExceeded) {
- t.Fatalf("wanted error %v, got %v", context.DeadlineExceeded, err)
- }
+ synctest.Test(t, func(t *testing.T) {
+ var (
+ ps, _ = newPullSync(t, nil, 0, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, _ = newPullSync(t, recorder, 0, mock.WithChunks(chunks...))
+ )
+
+ ctx, cancel := context.WithTimeout(context.Background(), 0)
+ cancel()
+ _, _, err := psClient.Sync(ctx, swarm.ZeroAddress, 0, 0)
+ if !errors.Is(err, context.DeadlineExceeded) {
+ t.Fatalf("wanted error %v, got %v", context.DeadlineExceeded, err)
+ }
+ })
}
func TestIncoming_WantOne(t *testing.T) {
- t.Parallel()
-
- var (
- topMost = uint64(4)
- ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, clientDb = newPullSync(t, recorder, 0, mock.WithChunks(someChunks(1, 2, 3, 4)...))
- )
-
- topmost, _, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
- if err != nil {
- t.Fatal(err)
- }
+ synctest.Test(t, func(t *testing.T) {
+ var (
+ topMost = uint64(4)
+ ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, clientDb = newPullSync(t, recorder, 0, mock.WithChunks(someChunks(1, 2, 3, 4)...))
+ )
+
+ topmost, _, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
- if topmost != topMost {
- t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
- }
+ if topmost != topMost {
+ t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
+ }
- // should have all
- haveChunks(t, clientDb, chunks...)
- if clientDb.PutCalls() != 1 {
- t.Fatalf("want 1 puts but got %d", clientDb.PutCalls())
- }
+ // should have all
+ haveChunks(t, clientDb, chunks...)
+ if clientDb.PutCalls() != 1 {
+ t.Fatalf("want 1 puts but got %d", clientDb.PutCalls())
+ }
+ })
}
func TestIncoming_WantAll(t *testing.T) {
- t.Parallel()
-
- var (
- topMost = uint64(4)
- ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, clientDb = newPullSync(t, recorder, 0)
- )
-
- topmost, _, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
- if err != nil {
- t.Fatal(err)
- }
+ synctest.Test(t, func(t *testing.T) {
+ var (
+ topMost = uint64(4)
+ ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, clientDb = newPullSync(t, recorder, 0)
+ )
+
+ topmost, _, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
- if topmost != topMost {
- t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
- }
+ if topmost != topMost {
+ t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
+ }
- // should have all
- haveChunks(t, clientDb, chunks...)
- if p := clientDb.PutCalls(); p != len(chunks) {
- t.Fatalf("want %d puts but got %d", len(chunks), p)
- }
+ // should have all
+ haveChunks(t, clientDb, chunks...)
+ if p := clientDb.PutCalls(); p != len(chunks) {
+ t.Fatalf("want %d puts but got %d", len(chunks), p)
+ }
+ })
}
func TestIncoming_WantErrors(t *testing.T) {
- t.Parallel()
-
- tChunks := testingc.GenerateTestRandomChunks(4)
- // add same chunk with a different batch id
- ch := swarm.NewChunk(tChunks[3].Address(), tChunks[3].Data()).WithStamp(postagetesting.MustNewStamp())
- tChunks = append(tChunks, ch)
- // add invalid chunk
- tChunks = append(tChunks, testingc.GenerateTestRandomInvalidChunk())
-
- tResults := make([]*storer.BinC, len(tChunks))
- for i, c := range tChunks {
- stampHash, err := c.Stamp().Hash()
- if err != nil {
- t.Fatal(err)
+ synctest.Test(t, func(t *testing.T) {
+ tChunks := testingc.GenerateTestRandomChunks(4)
+ // add same chunk with a different batch id
+ ch := swarm.NewChunk(tChunks[3].Address(), tChunks[3].Data()).WithStamp(postagetesting.MustNewStamp())
+ tChunks = append(tChunks, ch)
+ // add invalid chunk
+ tChunks = append(tChunks, testingc.GenerateTestRandomInvalidChunk())
+
+ tResults := make([]*storer.BinC, len(tChunks))
+ for i, c := range tChunks {
+ stampHash, err := c.Stamp().Hash()
+ if err != nil {
+ t.Fatal(err)
+ }
+ tResults[i] = &storer.BinC{
+ Address: c.Address(),
+ BatchID: c.Stamp().BatchID(),
+ BinID: uint64(i + 5), // start from a higher bin id
+ StampHash: stampHash,
+ }
}
- tResults[i] = &storer.BinC{
- Address: c.Address(),
- BatchID: c.Stamp().BatchID(),
- BinID: uint64(i + 5), // start from a higher bin id
- StampHash: stampHash,
- }
- }
- putHook := func(c swarm.Chunk) error {
- if c.Address().Equal(tChunks[1].Address()) {
- return storage.ErrOverwriteNewerChunk
+ putHook := func(c swarm.Chunk) error {
+ if c.Address().Equal(tChunks[1].Address()) {
+ return storage.ErrOverwriteNewerChunk
+ }
+ return nil
}
- return nil
- }
- validStampErr := errors.New("valid stamp error")
- validStamp := func(c swarm.Chunk) (swarm.Chunk, error) {
- if c.Address().Equal(tChunks[2].Address()) {
- return nil, validStampErr
+ validStampErr := errors.New("valid stamp error")
+ validStamp := func(c swarm.Chunk) (swarm.Chunk, error) {
+ if c.Address().Equal(tChunks[2].Address()) {
+ return nil, validStampErr
+ }
+ return c, nil
}
- return c, nil
- }
-
- var (
- topMost = uint64(10)
- ps, _ = newPullSync(t, nil, 20, mock.WithSubscribeResp(tResults, nil), mock.WithChunks(tChunks...))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, clientDb = newPullSyncWithStamperValidator(t, recorder, 0, validStamp, mock.WithPutHook(putHook))
- )
- topmost, count, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
- for _, e := range []error{storage.ErrOverwriteNewerChunk, validStampErr, swarm.ErrInvalidChunk} {
- if !errors.Is(err, e) {
- t.Fatalf("expected error %v", err)
+ var (
+ topMost = uint64(10)
+ ps, _ = newPullSync(t, nil, 20, mock.WithSubscribeResp(tResults, nil), mock.WithChunks(tChunks...))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, clientDb = newPullSyncWithStamperValidator(t, recorder, 0, validStamp, mock.WithPutHook(putHook))
+ )
+
+ topmost, count, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
+ for _, e := range []error{storage.ErrOverwriteNewerChunk, validStampErr, swarm.ErrInvalidChunk} {
+ if !errors.Is(err, e) {
+ t.Fatalf("expected error %v", err)
+ }
}
- }
- if count != 3 {
- t.Fatalf("got %d chunks but want %d", count, 3)
- }
+ if count != 3 {
+ t.Fatalf("got %d chunks but want %d", count, 3)
+ }
- if topmost != topMost {
- t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
- }
+ if topmost != topMost {
+ t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
+ }
- haveChunks(t, clientDb, append(tChunks[:1], tChunks[3:5]...)...)
- if p := clientDb.PutCalls(); p != len(chunks)-1 {
- t.Fatalf("want %d puts but got %d", len(chunks), p)
- }
+ haveChunks(t, clientDb, append(tChunks[:1], tChunks[3:5]...)...)
+ if p := clientDb.PutCalls(); p != len(chunks)-1 {
+ t.Fatalf("want %d puts but got %d", len(chunks), p)
+ }
+ })
}
func TestIncoming_UnsolicitedChunk(t *testing.T) {
- t.Parallel()
-
- evilAddr := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000666")
- evilData := []byte{0x66, 0x66, 0x66}
- stamp := postagetesting.MustNewStamp()
- evil := swarm.NewChunk(evilAddr, evilData).WithStamp(stamp)
-
- var (
- ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...), mock.WithEvilChunk(addrs[4], evil))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, _ = newPullSync(t, recorder, 0)
- )
-
- _, _, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
- if !errors.Is(err, pullsync.ErrUnsolicitedChunk) {
- t.Fatalf("expected err %v but got %v", pullsync.ErrUnsolicitedChunk, err)
- }
+ synctest.Test(t, func(t *testing.T) {
+ evilAddr := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000666")
+ evilData := []byte{0x66, 0x66, 0x66}
+ stamp := postagetesting.MustNewStamp()
+ evil := swarm.NewChunk(evilAddr, evilData).WithStamp(stamp)
+
+ var (
+ ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks(chunks...), mock.WithEvilChunk(addrs[4], evil))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, _ = newPullSync(t, recorder, 0)
+ )
+
+ _, _, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
+ if !errors.Is(err, pullsync.ErrUnsolicitedChunk) {
+ t.Fatalf("expected err %v but got %v", pullsync.ErrUnsolicitedChunk, err)
+ }
+ })
}
func TestMissingChunk(t *testing.T) {
- t.Parallel()
-
- var (
- zeroChunk = swarm.NewChunk(swarm.ZeroAddress, nil)
- topMost = uint64(4)
- ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks([]swarm.Chunk{zeroChunk}...))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, _ = newPullSync(t, recorder, 0)
- )
-
- topmost, count, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
- if err != nil {
- t.Fatal(err)
- }
+ synctest.Test(t, func(t *testing.T) {
+ var (
+ zeroChunk = swarm.NewChunk(swarm.ZeroAddress, nil)
+ topMost = uint64(4)
+ ps, _ = newPullSync(t, nil, 5, mock.WithSubscribeResp(results, nil), mock.WithChunks([]swarm.Chunk{zeroChunk}...))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, _ = newPullSync(t, recorder, 0)
+ )
+
+ topmost, count, err := psClient.Sync(context.Background(), swarm.ZeroAddress, 0, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
- if topmost != topMost {
- t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
- }
- if count != 0 {
- t.Fatalf("got count %d but want %d", count, 0)
- }
+ if topmost != topMost {
+ t.Fatalf("got offer topmost %d but want %d", topmost, topMost)
+ }
+ if count != 0 {
+ t.Fatalf("got count %d but want %d", count, 0)
+ }
+ })
}
func TestGetCursors(t *testing.T) {
- t.Parallel()
-
- var (
- epochTs = uint64(time.Now().Unix())
- mockCursors = []uint64{100, 101, 102, 103}
- ps, _ = newPullSync(t, nil, 0, mock.WithCursors(mockCursors, epochTs))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, _ = newPullSync(t, recorder, 0)
- )
-
- curs, epoch, err := psClient.GetCursors(context.Background(), swarm.ZeroAddress)
- if err != nil {
- t.Fatal(err)
- }
+ synctest.Test(t, func(t *testing.T) {
+ var (
+ epochTs = uint64(time.Now().Unix())
+ mockCursors = []uint64{100, 101, 102, 103}
+ ps, _ = newPullSync(t, nil, 0, mock.WithCursors(mockCursors, epochTs))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, _ = newPullSync(t, recorder, 0)
+ )
+
+ curs, epoch, err := psClient.GetCursors(context.Background(), swarm.ZeroAddress)
+ if err != nil {
+ t.Fatal(err)
+ }
- if len(curs) != len(mockCursors) {
- t.Fatalf("length mismatch got %d want %d", len(curs), len(mockCursors))
- }
+ if len(curs) != len(mockCursors) {
+ t.Fatalf("length mismatch got %d want %d", len(curs), len(mockCursors))
+ }
- if epochTs != epoch {
- t.Fatalf("epochs do not match got %d want %d", epoch, epochTs)
- }
+ if epochTs != epoch {
+ t.Fatalf("epochs do not match got %d want %d", epoch, epochTs)
+ }
- for i, v := range mockCursors {
- if curs[i] != v {
- t.Errorf("cursor mismatch. index %d want %d got %d", i, v, curs[i])
+ for i, v := range mockCursors {
+ if curs[i] != v {
+ t.Errorf("cursor mismatch. index %d want %d got %d", i, v, curs[i])
+ }
}
- }
+ })
}
func TestGetCursorsError(t *testing.T) {
- t.Parallel()
-
- var (
- e = errors.New("erring")
- ps, _ = newPullSync(t, nil, 0, mock.WithCursorsErr(e))
- recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
- psClient, _ = newPullSync(t, recorder, 0)
- )
-
- _, _, err := psClient.GetCursors(context.Background(), swarm.ZeroAddress)
- if err == nil {
- t.Fatal("expected error but got none")
- }
- if !errors.Is(err, io.EOF) {
- t.Fatalf("expect error '%v' but got '%v'", e, err)
- }
+ synctest.Test(t, func(t *testing.T) {
+ var (
+ e = errors.New("erring")
+ ps, _ = newPullSync(t, nil, 0, mock.WithCursorsErr(e))
+ recorder = streamtest.New(streamtest.WithProtocols(ps.Protocol()))
+ psClient, _ = newPullSync(t, recorder, 0)
+ )
+
+ _, _, err := psClient.GetCursors(context.Background(), swarm.ZeroAddress)
+ if err == nil {
+ t.Fatal("expected error but got none")
+ }
+ if !errors.Is(err, io.EOF) {
+ t.Fatalf("expect error '%v' but got '%v'", e, err)
+ }
+ })
}
func haveChunks(t *testing.T, s *mock.ReserveStore, chunks ...swarm.Chunk) {
diff --git a/pkg/pusher/pusher.go b/pkg/pusher/pusher.go
index 46632c18765..9c4073f1fcc 100644
--- a/pkg/pusher/pusher.go
+++ b/pkg/pusher/pusher.go
@@ -63,8 +63,7 @@ type Service struct {
}
const (
- traceDuration = 30 * time.Second // duration for every root tracing span
- ConcurrentPushes = swarm.Branches // how many chunks to push simultaneously
+ ConcurrentPushes = swarm.Branches // how many chunks to push simultaneously
DefaultRetryCount = 6
)
diff --git a/pkg/pusher/pusher_test.go b/pkg/pusher/pusher_test.go
index d45101d06f5..4abe0d67537 100644
--- a/pkg/pusher/pusher_test.go
+++ b/pkg/pusher/pusher_test.go
@@ -109,7 +109,7 @@ func (m *mockStorer) ReservePutter() storage.Putter {
)
}
-// TestSendChunkToPushSync sends a chunk to pushsync to be sent to its closest peer and get a receipt.
+// TestChunkSyncing sends a chunk to pushsync to be sent to its closest peer and get a receipt.
// once the receipt is got this check to see if the localstore is updated to see if the chunk is set
// as ModeSetSync status.
func TestChunkSyncing(t *testing.T) {
diff --git a/pkg/pushsync/main_test.go b/pkg/pushsync/main_test.go
index 1364235fc42..cfd84a04c51 100644
--- a/pkg/pushsync/main_test.go
+++ b/pkg/pushsync/main_test.go
@@ -11,5 +11,5 @@ import (
)
func TestMain(m *testing.M) {
- goleak.VerifyTestMain(m)
+ goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"))
}
diff --git a/pkg/pushsync/metrics.go b/pkg/pushsync/metrics.go
index 7f4f5884bf0..758a6f072a5 100644
--- a/pkg/pushsync/metrics.go
+++ b/pkg/pushsync/metrics.go
@@ -28,6 +28,7 @@ type metrics struct {
ReceiptDepth *prometheus.CounterVec
ShallowReceiptDepth *prometheus.CounterVec
ShallowReceipt prometheus.Counter
+ OverdraftRefresh prometheus.Counter
}
func newMetrics() metrics {
@@ -146,6 +147,12 @@ func newMetrics() metrics {
},
[]string{"depth"},
),
+ OverdraftRefresh: prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: m.Namespace,
+ Subsystem: subsystem,
+ Name: "overdraft_refresh",
+ Help: "Total number of times peers were skipped due to overdraft, requiring a wait to refresh balance.",
+ }),
}
}
diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go
index 9a9754a9ab3..05c4aef3d41 100644
--- a/pkg/pushsync/pushsync.go
+++ b/pkg/pushsync/pushsync.go
@@ -32,6 +32,7 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
olog "github.com/opentracing/opentracing-go/log"
+ "golang.org/x/time/rate"
)
// loggerName is the tree path name of the logger for this package.
@@ -99,6 +100,7 @@ type PushSync struct {
stabilizer stabilization.Subscriber
shallowReceiptTolerance uint8
+ overDraftRefreshLimiter *rate.Limiter
}
type receiptResult struct {
@@ -148,6 +150,7 @@ func New(
errSkip: skippeers.NewList(time.Minute),
stabilizer: stabilizer,
shallowReceiptTolerance: shallowReceiptTolerance,
+ overDraftRefreshLimiter: rate.NewLimiter(rate.Every(time.Second), 1),
}
ps.validStamp = ps.validStampWrapper(validStamp)
@@ -183,7 +186,10 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
ps.metrics.TotalHandlerTime.WithLabelValues("failure").Observe(time.Since(now).Seconds())
ps.metrics.TotalHandlerErrors.Inc()
if !attemptedWrite {
- _ = w.WriteMsgWithContext(ctx, &pb.Receipt{Err: err.Error()})
+ if writeErr := w.WriteMsgWithContext(ctx, &pb.Receipt{Err: err.Error()}); writeErr == nil {
+ _ = stream.FullClose()
+ return
+ }
}
_ = stream.Reset()
} else {
@@ -424,7 +430,10 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo
continue // there is still an inflight request, wait for it's result
}
- ps.logger.Debug("sleeping to refresh overdraft balance", "chunk_address", ch.Address())
+ ps.metrics.OverdraftRefresh.Inc()
+ if ps.overDraftRefreshLimiter.Allow() {
+ ps.logger.Debug("sleeping to refresh overdraft balance")
+ }
select {
case <-time.After(overDraftRefresh):
diff --git a/pkg/pushsync/pushsync_integration_test.go b/pkg/pushsync/pushsync_integration_test.go
new file mode 100644
index 00000000000..e9b2fa30f11
--- /dev/null
+++ b/pkg/pushsync/pushsync_integration_test.go
@@ -0,0 +1,201 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pushsync_test
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "encoding/json"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ accountingmock "github.com/ethersphere/bee/v2/pkg/accounting/mock"
+ "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/libp2ptest"
+ postagetesting "github.com/ethersphere/bee/v2/pkg/postage/testing"
+ pricermock "github.com/ethersphere/bee/v2/pkg/pricer/mock"
+ "github.com/ethersphere/bee/v2/pkg/pushsync"
+ "github.com/ethersphere/bee/v2/pkg/soc"
+ stabilizationmock "github.com/ethersphere/bee/v2/pkg/stabilization/mock"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storage/inmemchunkstore"
+ testingc "github.com/ethersphere/bee/v2/pkg/storage/testing"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ topologymock "github.com/ethersphere/bee/v2/pkg/topology/mock"
+)
+
+// TestPushSyncIntegration is a regression test for the fix of missing
+// FullClose on pushsync streams.
+func TestPushSyncIntegration(t *testing.T) {
+ // Capture logs to check for errors
+ var logMessages libp2ptest.SafeBuffer
+
+ // Enable debug logging
+ logger := log.NewLogger("test",
+ log.WithVerbosity(log.VerbosityDebug),
+ log.WithJSONOutput(),
+ log.WithSink(&logMessages),
+ )
+
+ // Create two libp2p services
+ server, serverAddr := libp2ptest.NewLibp2pService(t, 1, logger.WithValues("libp2p", "server").Build())
+ client, clientAddr := libp2ptest.NewLibp2pService(t, 1, logger.WithValues("libp2p", "client").Build())
+
+ // Server setup
+ serverStorer := &testIntegrationStorer{ChunkStore: inmemchunkstore.New()}
+ serverSigner := crypto.NewDefaultSigner(mustGenerateKey(t))
+
+ serverPushSync := pushsync.New(
+ serverAddr,
+ 1,
+ common.HexToHash("0x1").Bytes(),
+ server, // Use libp2p service as streamer
+ serverStorer,
+ func() (uint8, error) { return 0, nil },
+ topologymock.NewTopologyDriver(), // No peers needed for server in this test
+ true,
+ func(swarm.Chunk) {}, // unwrap
+ func(*soc.SOC) {}, // gsocHandler
+ func(c swarm.Chunk) (swarm.Chunk, error) {
+ return c.WithStamp(postagetesting.MustNewValidStamp(serverSigner, c.Address())), nil
+ },
+ logger.WithValues("pushsync", "server").Build(),
+ accountingmock.NewAccounting(),
+ pricermock.NewMockService(10, 10),
+ serverSigner,
+ nil,
+ stabilizationmock.NewSubscriber(true),
+ 0,
+ )
+ t.Cleanup(func() { serverPushSync.Close() })
+
+ // Register pushsync protocol on server
+ if err := server.AddProtocol(serverPushSync.Protocol()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Client setup
+ clientStorer := &testIntegrationStorer{ChunkStore: inmemchunkstore.New()}
+ clientSigner := crypto.NewDefaultSigner(mustGenerateKey(t))
+
+ // Client needs to know server is the closest peer
+ clientTopology := topologymock.NewTopologyDriver(topologymock.WithPeers(serverAddr))
+
+ clientPushSync := pushsync.New(
+ clientAddr,
+ 1,
+ common.HexToHash("0x1").Bytes(),
+ client, // Use libp2p service as streamer
+ clientStorer,
+ func() (uint8, error) { return 0, nil },
+ clientTopology,
+ true,
+ func(swarm.Chunk) {}, // unwrap
+ func(*soc.SOC) {}, // gsocHandler
+ func(c swarm.Chunk) (swarm.Chunk, error) {
+ return c.WithStamp(postagetesting.MustNewValidStamp(clientSigner, c.Address())), nil
+ },
+ logger.WithValues("pushsync", "client").Build(),
+ accountingmock.NewAccounting(),
+ pricermock.NewMockService(10, 10),
+ clientSigner,
+ nil,
+ stabilizationmock.NewSubscriber(true),
+ 0,
+ )
+ t.Cleanup(func() { clientPushSync.Close() })
+
+ // Register pushsync protocol on client
+ if err := client.AddProtocol(clientPushSync.Protocol()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Connect client to server
+ serverAddrs, err := server.Addresses()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Connect(context.Background(), serverAddrs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Push chunk
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ // Generate a chunk that is closer to the server than the client
+ var chunk swarm.Chunk
+ for {
+ chunk = testingc.GenerateTestRandomChunk()
+ // Check if chunk is closer to server
+ if swarm.Proximity(serverAddr.Bytes(), chunk.Address().Bytes()) > swarm.Proximity(clientAddr.Bytes(), chunk.Address().Bytes()) {
+ break
+ }
+ }
+
+ _, err = clientPushSync.PushChunkToClosest(ctx, chunk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify chunk is stored on server
+ has, err := serverStorer.Has(ctx, chunk.Address())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !has {
+ t.Fatal("chunk not found on server")
+ }
+
+ // Validate that streams are properly closed by the pushsync service
+ // by checking for "stream reset" log entries
+ streamResetLogLine := ""
+ for line := range strings.SplitSeq(logMessages.String(), "\n") {
+ if line == "" {
+ continue
+ }
+ var entry map[string]any
+ if err := json.Unmarshal([]byte(line), &entry); err != nil {
+ t.Fatalf("failed to unmarshal log entry: %v", err)
+ }
+ errorString, ok := entry["error"].(string)
+ if !ok {
+ continue
+ }
+ if strings.Contains(errorString, "stream reset") {
+ streamResetLogLine = line
+ break
+ }
+ }
+ if streamResetLogLine != "" {
+ t.Errorf("found stream reset log line: %s", streamResetLogLine)
+ }
+}
+
+type testIntegrationStorer struct {
+ storage.ChunkStore
+}
+
+func (t *testIntegrationStorer) Report(context.Context, swarm.Chunk, storage.ChunkState) error {
+ return nil
+}
+
+func (t *testIntegrationStorer) ReservePutter() storage.Putter {
+ return t.ChunkStore
+}
+
+func mustGenerateKey(t *testing.T) *ecdsa.PrivateKey {
+ t.Helper()
+ k, err := crypto.GenerateSecp256k1Key()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return k
+}
diff --git a/pkg/pushsync/pushsync_test.go b/pkg/pushsync/pushsync_test.go
index 7e1c661f089..90e6d654a5b 100644
--- a/pkg/pushsync/pushsync_test.go
+++ b/pkg/pushsync/pushsync_test.go
@@ -308,7 +308,7 @@ func TestShallowReceiptTolerance(t *testing.T) {
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
}
-// PushChunkToClosest tests the sending of chunk to closest peer from the origination source perspective.
+// TestPushChunkToClosest tests the sending of chunk to closest peer from the origination source perspective.
// it also checks whether the tags are incremented properly if they are present
func TestPushChunkToClosest(t *testing.T) {
t.Parallel()
diff --git a/pkg/replicas/getter.go b/pkg/replicas/getter.go
index 5dc42cb7c82..b08b5c780e8 100644
--- a/pkg/replicas/getter.go
+++ b/pkg/replicas/getter.go
@@ -60,9 +60,7 @@ func (g *getter) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, e
errcnt := 0
// concurrently call to retrieve chunk using original CAC address
- g.wg.Add(1)
- go func() {
- defer g.wg.Done()
+ g.wg.Go(func() {
ch, err := g.Getter.Get(ctx, addr)
if err != nil {
errc <- err
@@ -73,7 +71,7 @@ func (g *getter) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, e
case resultC <- ch:
case <-ctx.Done():
}
- }()
+ })
// counters
n := 0 // counts the replica addresses tried
target := 2 // the number of replicas attempted to download in this batch
@@ -117,9 +115,7 @@ func (g *getter) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, e
continue
}
- g.wg.Add(1)
- go func() {
- defer g.wg.Done()
+ g.wg.Go(func() {
ch, err := g.Getter.Get(ctx, swarm.NewAddress(so.addr))
if err != nil {
errc <- err
@@ -136,7 +132,7 @@ func (g *getter) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, e
case resultC <- soc.WrappedChunk():
case <-ctx.Done():
}
- }()
+ })
n++
if n < target {
continue
diff --git a/pkg/replicas/putter.go b/pkg/replicas/putter.go
index 8e23059e1cf..7614dee56d0 100644
--- a/pkg/replicas/putter.go
+++ b/pkg/replicas/putter.go
@@ -43,15 +43,13 @@ func (p *putter) Put(ctx context.Context, ch swarm.Chunk) (err error) {
errc := make(chan error, p.rLevel.GetReplicaCount())
wg := sync.WaitGroup{}
for r := range rr.c {
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
sch, err := soc.New(r.id, ch).Sign(signer)
if err == nil {
err = p.putter.Put(ctx, sch)
}
errc <- err
- }()
+ })
}
wg.Wait()
diff --git a/pkg/resolver/client/ens/ens.go b/pkg/resolver/client/ens/ens.go
index 38c7432e979..b921ad8c9b7 100644
--- a/pkg/resolver/client/ens/ens.go
+++ b/pkg/resolver/client/ens/ens.go
@@ -167,10 +167,10 @@ func wrapDial(endpoint, contractAddr string) (*ethclient.Client, *goens.Registry
}
func wrapResolve(registry *goens.Registry, _ common.Address, name string) (string, error) {
- // Ensure the name is registered.
ownerAddress, err := registry.Owner(name)
+ // it returns error only if the service is not available
if err != nil {
- return "", fmt.Errorf("owner: %w: %w", err, resolver.ErrNotFound)
+ return "", fmt.Errorf("%w: %w", resolver.ErrServiceNotAvailable, err)
}
// If the name is not registered, return an error.
@@ -181,12 +181,16 @@ func wrapResolve(registry *goens.Registry, _ common.Address, name string) (strin
// Obtain the resolver for this domain name.
ensR, err := registry.Resolver(name)
if err != nil {
- return "", fmt.Errorf("resolver: %w: %w", err, resolver.ErrServiceNotAvailable)
+ return "", fmt.Errorf("%w: %w", resolver.ErrServiceNotAvailable, err)
}
// Try and read out the content hash record.
ch, err := ensR.Contenthash()
if err != nil {
+ // Check if it's a service error (rate limiting, network issues)
+ if strings.Contains(err.Error(), "429") || strings.Contains(err.Error(), "rate limit") {
+ return "", fmt.Errorf("%w: %w", resolver.ErrServiceNotAvailable, err)
+ }
return "", fmt.Errorf("contenthash: %w: %w", err, resolver.ErrInvalidContentHash)
}
diff --git a/pkg/resolver/multiresolver/multiresolver.go b/pkg/resolver/multiresolver/multiresolver.go
index e91f5155f28..87cfda56d16 100644
--- a/pkg/resolver/multiresolver/multiresolver.go
+++ b/pkg/resolver/multiresolver/multiresolver.go
@@ -14,6 +14,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/resolver"
"github.com/ethersphere/bee/v2/pkg/resolver/cidv1"
"github.com/ethersphere/bee/v2/pkg/resolver/client/ens"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/hashicorp/go-multierror"
)
@@ -35,6 +36,8 @@ var (
ErrResolverChainFailed = errors.New("resolver chain failed")
// ErrCloseFailed denotes that closing the multiresolver failed.
ErrCloseFailed = errors.New("close failed")
+ // ErrResolverService denotes that no resolver service is configured for the requested name or the resolver service is not available.
+ ErrResolverService = errors.New("cannot communicate with the resolver or no resolver service configured")
)
type resolverMap map[string][]resolver.Interface
@@ -160,6 +163,9 @@ func (mr *MultiResolver) Resolve(name string) (addr resolver.Address, err error)
// If no resolver chain is found, switch to the default chain.
if len(chain) == 0 {
chain = mr.resolvers[""]
+ if len(chain) == 1 && tld != "" { // only the CID resolver is defined
+ return swarm.ZeroAddress, resolver.ErrServiceNotAvailable
+ }
}
var errs *multierror.Error
diff --git a/pkg/resolver/multiresolver/multiresolver_test.go b/pkg/resolver/multiresolver/multiresolver_test.go
index c1f9ba0311a..8c242004696 100644
--- a/pkg/resolver/multiresolver/multiresolver_test.go
+++ b/pkg/resolver/multiresolver/multiresolver_test.go
@@ -217,7 +217,7 @@ func TestResolve(t *testing.T) {
},
{
// Switch to the default chain:
- name: "this.empty",
+ name: "defaultChainIsCidTriggerItWithoutTld",
wantAdr: addr,
},
{
diff --git a/pkg/resolver/resolver.go b/pkg/resolver/resolver.go
index 043dcaf87e0..e289d454671 100644
--- a/pkg/resolver/resolver.go
+++ b/pkg/resolver/resolver.go
@@ -20,7 +20,7 @@ var (
// ErrNotFound denotes that given name was not found
ErrNotFound = errors.New("not found")
// ErrServiceNotAvailable denotes that remote ENS service is not available
- ErrServiceNotAvailable = errors.New("not available")
+ ErrServiceNotAvailable = errors.New("ENS service is not available")
// ErrInvalidContentHash denotes that the value of the response contenthash record is not valid.
ErrInvalidContentHash = errors.New("invalid swarm content hash")
)
diff --git a/pkg/retrieval/main_test.go b/pkg/retrieval/main_test.go
index 8be3983b65d..532dcc95bcb 100644
--- a/pkg/retrieval/main_test.go
+++ b/pkg/retrieval/main_test.go
@@ -16,5 +16,6 @@ func TestMain(m *testing.M) {
// pkg/p2p package has some leak issues, we ignore them here as they are not in current scope
goleak.IgnoreTopFunction("github.com/ethersphere/bee/v2/pkg/p2p/protobuf.Reader.ReadMsgWithContext"),
goleak.IgnoreTopFunction("github.com/ethersphere/bee/v2/pkg/p2p/streamtest.(*record).Read"),
+ goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
)
}
diff --git a/pkg/retrieval/retrieval.go b/pkg/retrieval/retrieval.go
index 2fce97be5a2..f1e5f5193b7 100644
--- a/pkg/retrieval/retrieval.go
+++ b/pkg/retrieval/retrieval.go
@@ -431,7 +431,10 @@ func (s *Service) handler(p2pctx context.Context, p p2p.Peer, stream p2p.Stream)
defer func() {
if err != nil {
if !attemptedWrite {
- _ = w.WriteMsgWithContext(ctx, &pb.Delivery{Err: err.Error()})
+ if writeErr := w.WriteMsgWithContext(ctx, &pb.Delivery{Err: err.Error()}); writeErr == nil {
+ _ = stream.FullClose()
+ return
+ }
}
_ = stream.Reset()
} else {
diff --git a/pkg/retrieval/retrieval_integration_test.go b/pkg/retrieval/retrieval_integration_test.go
new file mode 100644
index 00000000000..de37e46018a
--- /dev/null
+++ b/pkg/retrieval/retrieval_integration_test.go
@@ -0,0 +1,153 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package retrieval_test
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "strings"
+ "testing"
+ "time"
+
+ accountingmock "github.com/ethersphere/bee/v2/pkg/accounting/mock"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/libp2ptest"
+ pricermock "github.com/ethersphere/bee/v2/pkg/pricer/mock"
+ "github.com/ethersphere/bee/v2/pkg/retrieval"
+ "github.com/ethersphere/bee/v2/pkg/storage/inmemchunkstore"
+ testingc "github.com/ethersphere/bee/v2/pkg/storage/testing"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ topologymock "github.com/ethersphere/bee/v2/pkg/topology/mock"
+)
+
+// TestRetrievalIntegration is a regression test for the fix of missing
+// FullClose on retrieval streams.
+func TestRetrievalIntegration(t *testing.T) {
+ // Capture logs to check for errors
+ var logMessages libp2ptest.SafeBuffer
+
+ // Enable debug logging
+ logger := log.NewLogger("test",
+ log.WithVerbosity(log.VerbosityDebug),
+ log.WithJSONOutput(),
+ log.WithSink(&logMessages),
+ )
+
+ // Create two libp2p services
+ server, serverAddr := libp2ptest.NewLibp2pService(t, 1, logger.WithValues("libp2p", "server").Build())
+ client, clientAddr := libp2ptest.NewLibp2pService(t, 1, logger.WithValues("libp2p", "client").Build())
+
+ // Setup chunk
+ chunk := testingc.FixtureChunk("0033")
+
+ // Server setup
+ serverStorer := &testStorer{ChunkStore: inmemchunkstore.New()}
+ err := serverStorer.Put(context.Background(), chunk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ serverRetrieval := retrieval.New(
+ serverAddr,
+ func() (uint8, error) { return swarm.MaxBins, nil },
+ serverStorer,
+ server, // Use libp2p service as streamer
+ topologymock.NewTopologyDriver(), // No peers needed for server in this test
+ logger.WithValues("retrieval", "server").Build(),
+ accountingmock.NewAccounting(),
+ pricermock.NewMockService(10, 10),
+ nil,
+ false,
+ )
+ t.Cleanup(func() { serverRetrieval.Close() })
+
+ // Register retrieval protocol on server
+ if err := server.AddProtocol(serverRetrieval.Protocol()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Client setup
+ clientStorer := &testStorer{ChunkStore: inmemchunkstore.New()}
+
+ // Client needs to know server is the closest peer
+ clientTopology := topologymock.NewTopologyDriver(topologymock.WithPeers(serverAddr))
+
+ clientRetrieval := retrieval.New(
+ clientAddr,
+ func() (uint8, error) { return swarm.MaxBins, nil },
+ clientStorer,
+ client, // Use libp2p service as streamer
+ clientTopology,
+ logger.WithValues("retrieval", "client").Build(),
+ accountingmock.NewAccounting(),
+ pricermock.NewMockService(10, 10),
+ nil,
+ false,
+ )
+ t.Cleanup(func() { clientRetrieval.Close() })
+
+ // Register retrieval protocol on client
+ if err := client.AddProtocol(clientRetrieval.Protocol()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Connect client to server
+ serverAddrs, err := server.Addresses()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Connect(context.Background(), serverAddrs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Retrieve chunk
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ got, err := clientRetrieval.RetrieveChunk(ctx, chunk.Address(), swarm.ZeroAddress)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(got.Data(), chunk.Data()) {
+ t.Fatalf("request and response data not equal. got %s want %s", got.Data(), chunk.Data())
+ }
+
+ // Retrieve non-existing chunk
+ nonExistingChunk := testingc.GenerateTestRandomChunk()
+ _, err = clientRetrieval.RetrieveChunk(ctx, nonExistingChunk.Address(), swarm.ZeroAddress)
+ if !errors.Is(err, topology.ErrNotFound) {
+ t.Fatalf("expected error retrieving non-existing chunk, got %v", err)
+ }
+
+ // Validate that streams are properly closed by the retrieval service
+ // by checking for "stream reset" log entries
+ streamResetLogLine := ""
+ for line := range strings.SplitSeq(logMessages.String(), "\n") {
+ if line == "" {
+ continue
+ }
+ var entry map[string]any
+ if err := json.Unmarshal([]byte(line), &entry); err != nil {
+ t.Fatalf("failed to unmarshal log entry: %v", err)
+ }
+ errorString, ok := entry["error"].(string)
+ if !ok {
+ continue
+ }
+ if strings.Contains(errorString, "stream reset") {
+ streamResetLogLine = line
+ break
+ }
+ }
+ if streamResetLogLine != "" {
+ t.Errorf("found stream reset log line: %s", streamResetLogLine)
+ }
+}
diff --git a/pkg/salud/metrics.go b/pkg/salud/metrics.go
index a99087d219b..8ac5abdb517 100644
--- a/pkg/salud/metrics.go
+++ b/pkg/salud/metrics.go
@@ -19,6 +19,8 @@ type metrics struct {
ReserveSizePercentErr prometheus.Gauge
Healthy prometheus.Counter
Unhealthy prometheus.Counter
+ NeighborhoodAvgDur prometheus.Gauge
+ NeighborCount prometheus.Gauge
}
func newMetrics() metrics {
@@ -79,6 +81,19 @@ func newMetrics() metrics {
Name: "reserve_size_percentage_err",
Help: "Percentage error of the reservesize relative to the network average.",
}),
+ // Neighborhood-specific metrics
+ NeighborhoodAvgDur: prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: m.Namespace,
+ Subsystem: subsystem,
+ Name: "neighborhood_dur",
+ Help: "Average duration for snapshot response from neighborhood peers.",
+ }),
+ NeighborCount: prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: m.Namespace,
+ Subsystem: subsystem,
+ Name: "neighbors",
+ Help: "Number of neighborhood peers.",
+ }),
}
}
diff --git a/pkg/salud/salud.go b/pkg/salud/salud.go
index 8a9937adee2..1cf4ffa8373 100644
--- a/pkg/salud/salud.go
+++ b/pkg/salud/salud.go
@@ -29,7 +29,6 @@ const (
initialBackoffDelay = 10 * time.Second
maxBackoffDelay = 5 * time.Minute
backoffFactor = 2
- DefaultMinPeersPerBin = 4
DefaultDurPercentile = 0.4 // consider 40% as healthy, lower percentile = stricter duration check
DefaultConnsPercentile = 0.8 // consider 80% as healthy, lower percentile = stricter conns check
)
@@ -64,7 +63,6 @@ func New(
logger log.Logger,
startupStabilizer stabilization.Subscriber,
mode string,
- minPeersPerbin int,
durPercentile float64,
connsPercentile float64,
) *service {
@@ -81,12 +79,12 @@ func New(
}
s.wg.Add(1)
- go s.worker(startupStabilizer, mode, minPeersPerbin, durPercentile, connsPercentile)
+ go s.worker(startupStabilizer, mode, durPercentile, connsPercentile)
return s
}
-func (s *service) worker(startupStabilizer stabilization.Subscriber, mode string, minPeersPerbin int, durPercentile float64, connsPercentile float64) {
+func (s *service) worker(startupStabilizer stabilization.Subscriber, mode string, durPercentile float64, connsPercentile float64) {
defer s.wg.Done()
sub, unsubscribe := startupStabilizer.Subscribe()
@@ -102,7 +100,7 @@ func (s *service) worker(startupStabilizer stabilization.Subscriber, mode string
currentDelay := initialBackoffDelay
for {
- s.salud(mode, minPeersPerbin, durPercentile, connsPercentile)
+ s.salud(mode, durPercentile, connsPercentile)
select {
case <-s.quit:
@@ -134,19 +132,18 @@ type peer struct {
// salud acquires the status snapshot of every peer and computes an nth percentile of response duration and connected
// per count, the most common storage radius, and the batch commitment, and based on these values, marks peers as unhealhy that fall beyond
// the allowed thresholds.
-func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, connsPercentile float64) {
+func (s *service) salud(mode string, durPercentile float64, connsPercentile float64) {
var (
- mtx sync.Mutex
- wg sync.WaitGroup
- totaldur float64
- peers []peer
- bins [swarm.MaxBins]int
+ mtx sync.Mutex
+ wg sync.WaitGroup
+ totaldur float64
+ peers []peer
+ neighborhoodPeers uint
+ neighborhoodTotalDur float64
)
err := s.topology.EachConnectedPeer(func(addr swarm.Address, bin uint8) (stop bool, jumpToNext bool, err error) {
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
defer cancel()
@@ -165,11 +162,15 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64,
}
mtx.Lock()
- bins[bin]++
totaldur += dur.Seconds()
- peers = append(peers, peer{snapshot, dur, addr, bin, s.reserve.IsWithinStorageRadius(addr)})
+ peer := peer{snapshot, dur, addr, bin, s.reserve.IsWithinStorageRadius(addr)}
+ peers = append(peers, peer)
+ if peer.neighbor {
+ neighborhoodPeers++
+ neighborhoodTotalDur += dur.Seconds()
+ }
mtx.Unlock()
- }()
+ })
return false, false, nil
}, topology.Select{})
if err != nil {
@@ -188,6 +189,20 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64,
pConns := percentileConns(peers, connsPercentile)
commitment := commitment(peers)
+ if neighborhoodPeers > 0 {
+ neighborhoodAvgDur := neighborhoodTotalDur / float64(neighborhoodPeers)
+
+ s.metrics.NeighborhoodAvgDur.Set(neighborhoodAvgDur)
+ s.metrics.NeighborCount.Set(float64(neighborhoodPeers))
+
+ s.logger.Debug("neighborhood metrics", "avg_dur", neighborhoodAvgDur, "count", neighborhoodPeers)
+ } else {
+ s.metrics.NeighborhoodAvgDur.Set(0)
+ s.metrics.NeighborCount.Set(0)
+
+ s.logger.Debug("no neighborhood peers found for metrics")
+ }
+
s.metrics.AvgDur.Set(avgDur)
s.metrics.PDur.Set(pDur)
s.metrics.PConns.Set(float64(pConns))
@@ -195,7 +210,7 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64,
s.metrics.NeighborhoodRadius.Set(float64(nHoodRadius))
s.metrics.Commitment.Set(float64(commitment))
- s.logger.Debug("computed", "avg_dur", avgDur, "pDur", pDur, "pConns", pConns, "network_radius", networkRadius, "neighborhood_radius", nHoodRadius, "batch_commitment", commitment)
+ s.logger.Debug("computed", "avg_dur", avgDur, "pDur", pDur, "pConns", pConns, "network_radius", networkRadius, "neighborhood_radius", nHoodRadius, "batch_commitment", commitment, "neighborhood_peers", neighborhoodPeers)
// sort peers by duration, highest first to give priority to the fastest peers
sort.Slice(peers, func(i, j int) bool {
@@ -206,17 +221,10 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64,
var healthy bool
- // every bin should have at least some peers, healthy or not
- if bins[peer.bin] <= minPeersPerbin {
- s.metrics.Healthy.Inc()
- s.topology.UpdatePeerHealth(peer.addr, true, peer.dur)
- continue
- }
-
if networkRadius > 0 && peer.status.CommittedDepth < uint32(networkRadius-2) {
s.logger.Debug("radius health failure", "radius", peer.status.CommittedDepth, "peer_address", peer.addr, "bin", peer.bin)
} else if peer.dur.Seconds() > pDur {
- s.logger.Debug("response duration below threshold", "duration", peer.dur, "peer_address", peer.addr, "bin", peer.bin)
+ s.logger.Debug("response duration above threshold", "duration", peer.dur, "peer_address", peer.addr, "bin", peer.bin)
} else if peer.status.ConnectedPeers < pConns {
s.logger.Debug("connections count below threshold", "connections", peer.status.ConnectedPeers, "peer_address", peer.addr, "bin", peer.bin)
} else if peer.status.BatchCommitment != commitment {
@@ -230,7 +238,6 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64,
s.metrics.Healthy.Inc()
} else {
s.metrics.Unhealthy.Inc()
- bins[peer.bin]--
}
}
diff --git a/pkg/salud/salud_test.go b/pkg/salud/salud_test.go
index 3f40d54ad2e..4123702263e 100644
--- a/pkg/salud/salud_test.go
+++ b/pkg/salud/salud_test.go
@@ -72,7 +72,7 @@ func TestSalud(t *testing.T) {
mockstorer.WithCapacityDoubling(2),
)
- service := salud.New(statusM, topM, reserve, log.Noop, stabilmock.NewSubscriber(true), "full", 0, 0.8, 0.8)
+ service := salud.New(statusM, topM, reserve, log.Noop, stabilmock.NewSubscriber(true), "full", 0.8, 0.8)
err := spinlock.Wait(time.Minute, func() bool {
return len(topM.PeersHealth()) == len(peers)
@@ -119,7 +119,7 @@ func TestSelfUnhealthyRadius(t *testing.T) {
mockstorer.WithCapacityDoubling(0),
)
- service := salud.New(statusM, topM, reserve, log.Noop, stabilmock.NewSubscriber(true), "full", 0, 0.8, 0.8)
+ service := salud.New(statusM, topM, reserve, log.Noop, stabilmock.NewSubscriber(true), "full", 0.8, 0.8)
testutil.CleanupCloser(t, service)
err := spinlock.Wait(time.Minute, func() bool {
@@ -157,7 +157,7 @@ func TestSelfHealthyCapacityDoubling(t *testing.T) {
mockstorer.WithCapacityDoubling(2),
)
- service := salud.New(statusM, topM, reserve, log.Noop, stabilmock.NewSubscriber(true), "full", 0, 0.8, 0.8)
+ service := salud.New(statusM, topM, reserve, log.Noop, stabilmock.NewSubscriber(true), "full", 0.8, 0.8)
testutil.CleanupCloser(t, service)
err := spinlock.Wait(time.Minute, func() bool {
@@ -187,7 +187,7 @@ func TestSubToRadius(t *testing.T) {
topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...))
- service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, stabilmock.NewSubscriber(true), "full", 0, 0.8, 0.8)
+ service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, stabilmock.NewSubscriber(true), "full", 0.8, 0.8)
c, unsub := service.SubscribeNetworkStorageRadius()
t.Cleanup(unsub)
@@ -220,7 +220,7 @@ func TestUnsub(t *testing.T) {
topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...))
- service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, stabilmock.NewSubscriber(true), "full", 0, 0.8, 0.8)
+ service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, stabilmock.NewSubscriber(true), "full", 0.8, 0.8)
testutil.CleanupCloser(t, service)
c, unsub := service.SubscribeNetworkStorageRadius()
diff --git a/pkg/settlement/swap/chequebook/factory.go b/pkg/settlement/swap/chequebook/factory.go
index 53457679d2e..f548643bac1 100644
--- a/pkg/settlement/swap/chequebook/factory.go
+++ b/pkg/settlement/swap/chequebook/factory.go
@@ -9,13 +9,13 @@ import (
"fmt"
"math/big"
+ "context"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/bee/v2/pkg/sctx"
"github.com/ethersphere/bee/v2/pkg/transaction"
"github.com/ethersphere/bee/v2/pkg/util/abiutil"
"github.com/ethersphere/go-sw3-abi/sw3abi"
- "golang.org/x/net/context"
)
var (
diff --git a/pkg/settlement/swap/chequebook/init.go b/pkg/settlement/swap/chequebook/init.go
index a89553e0f1c..8c6b1563fae 100644
--- a/pkg/settlement/swap/chequebook/init.go
+++ b/pkg/settlement/swap/chequebook/init.go
@@ -60,12 +60,12 @@ func checkBalance(
minimumEth := big.NewInt(0)
if gasPrice == nil {
- gasPrice, err = swapBackend.SuggestGasPrice(timeoutCtx)
+ gasPrice, _, err = swapBackend.SuggestedFeeAndTip(timeoutCtx, gasPrice, 0)
if err != nil {
return err
}
- minimumEth = gasPrice.Mul(gasPrice, big.NewInt(250000))
+ minimumEth = new(big.Int).Mul(gasPrice, big.NewInt(250000))
}
insufficientERC20 := erc20Balance.Cmp(swapInitialDeposit) < 0
@@ -98,9 +98,16 @@ func checkBalance(
msg := fmt.Sprintf("cannot continue until there is at least min %s available on address", swarmTokenName)
logger.Warning(msg, "min_amount", neededERC20, "address", overlayEthAddress)
}
+
if chainId == chaincfg.Testnet.ChainID {
logger.Warning("learn how to fund your node by visiting our docs at https://docs.ethswarm.org/docs/installation/fund-your-node")
}
+
+ if chainId == chaincfg.Mainnet.ChainID {
+ fundingURL := fmt.Sprintf("https://fund.ethswarm.org/?destination=%s&intent=initial-funding", overlayEthAddress.Hex())
+ logger.Info(fmt.Sprintf("fund your node using the funding URL: %s", fundingURL))
+ }
+
select {
case <-time.After(balanceCheckBackoffDuration):
case <-timeoutCtx.Done():
diff --git a/pkg/settlement/swap/swapprotocol/swapprotocol.go b/pkg/settlement/swap/swapprotocol/swapprotocol.go
index 45715621208..5153a307bc3 100644
--- a/pkg/settlement/swap/swapprotocol/swapprotocol.go
+++ b/pkg/settlement/swap/swapprotocol/swapprotocol.go
@@ -126,7 +126,7 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
exchangeRate, deduction, err := swap.ParseSettlementResponseHeaders(responseHeaders)
if err != nil {
if !errors.Is(err, swap.ErrNoDeductionHeader) {
- return err
+ return fmt.Errorf("parse settlement response headers: %w", err)
}
deduction = big.NewInt(0)
}
@@ -134,11 +134,15 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
var signedCheque *chequebook.SignedCheque
err = json.Unmarshal(req.Cheque, &signedCheque)
if err != nil {
- return err
+ return fmt.Errorf("unmarshal cheque: %w", err)
}
// signature validation
- return s.swap.ReceiveCheque(ctx, p.Address, signedCheque, exchangeRate, deduction)
+ if err := s.swap.ReceiveCheque(ctx, p.Address, signedCheque, exchangeRate, deduction); err != nil {
+ return fmt.Errorf("receive cheque: %w", err)
+ }
+
+ return nil
}
func (s *Service) headler(receivedHeaders p2p.Headers, peerAddress swarm.Address) (returnHeaders p2p.Headers) {
@@ -163,14 +167,12 @@ func (s *Service) headler(receivedHeaders p2p.Headers, peerAddress swarm.Address
// InitiateCheque attempts to send a cheque to a peer.
func (s *Service) EmitCheque(ctx context.Context, peer swarm.Address, beneficiary common.Address, amount *big.Int, issue IssueFunc) (balance *big.Int, err error) {
- loggerV1 := s.logger.V(1).Register()
-
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("new stream: %w", err)
}
defer func() {
if err != nil {
@@ -185,7 +187,7 @@ func (s *Service) EmitCheque(ctx context.Context, peer swarm.Address, beneficiar
exchangeRate, deduction, err := swap.ParseSettlementResponseHeaders(returnedHeaders)
if err != nil {
if !errors.Is(err, swap.ErrNoDeductionHeader) {
- return nil, err
+ return nil, fmt.Errorf("parse settlement response headers: %w", err)
}
deduction = big.NewInt(0)
}
@@ -195,7 +197,7 @@ func (s *Service) EmitCheque(ctx context.Context, peer swarm.Address, beneficiar
// get whether peer have deducted in the past
checkPeer, err := s.swap.GetDeductionByPeer(peer)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("get deduction by peer: %w", err)
}
// if peer is not entitled for deduction but sent non zero deduction value, return with error
@@ -206,7 +208,7 @@ func (s *Service) EmitCheque(ctx context.Context, peer swarm.Address, beneficiar
// get current global exchangeRate rate and deduction
checkExchangeRate, checkDeduction, err := s.priceOracle.CurrentRates()
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("get current rates: %w", err)
}
// exchangeRate rates should match
@@ -232,7 +234,7 @@ func (s *Service) EmitCheque(ctx context.Context, peer swarm.Address, beneficiar
}
// sending cheque
- loggerV1.Debug("sending cheque message to peer", "peer_address", peer, "cheque", cheque)
+ s.logger.Debug("sending cheque message to peer", "peer_address", peer, "cheque", cheque)
w := protobuf.NewWriter(stream)
return w.WriteMsgWithContext(ctx, &pb.EmitCheque{
@@ -241,13 +243,13 @@ func (s *Service) EmitCheque(ctx context.Context, peer swarm.Address, beneficiar
})
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("call issue function: %w", err)
}
if deduction.Cmp(big.NewInt(0)) != 0 {
err = s.swap.AddDeductionByPeer(peer)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("add deduction for peer: %w", err)
}
}
diff --git a/pkg/sharky/recovery.go b/pkg/sharky/recovery.go
index 2b19ee93935..af064fda60a 100644
--- a/pkg/sharky/recovery.go
+++ b/pkg/sharky/recovery.go
@@ -30,7 +30,7 @@ func NewRecovery(dir string, shardCnt int, datasize int) (*Recovery, error) {
shards := make([]*slots, shardCnt)
shardFiles := make([]*os.File, shardCnt)
- for i := 0; i < shardCnt; i++ {
+ for i := range shardCnt {
file, err := os.OpenFile(path.Join(dir, fmt.Sprintf("shard_%03d", i)), os.O_RDWR, 0666)
if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("index %d: %w", i, ErrShardNotFound)
diff --git a/pkg/sharky/recovery_test.go b/pkg/sharky/recovery_test.go
index 1cc7ae3d55e..73571c5811e 100644
--- a/pkg/sharky/recovery_test.go
+++ b/pkg/sharky/recovery_test.go
@@ -114,7 +114,7 @@ func TestRecovery(t *testing.T) {
defer cancel()
runs := 96
- for i := 0; i < runs; i++ {
+ for range runs {
loc, err := s.Write(cctx, payload)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
diff --git a/pkg/sharky/shard.go b/pkg/sharky/shard.go
index 5073d63bd87..c9047c979a2 100644
--- a/pkg/sharky/shard.go
+++ b/pkg/sharky/shard.go
@@ -102,11 +102,9 @@ func (sh *shard) process() {
defer func() {
// this condition checks if an slot is in limbo (popped but not used for write op)
if writes != nil {
- sh.slots.limboWG.Add(1)
- go func() {
- defer sh.slots.limboWG.Done()
+ sh.slots.limboWG.Go(func() {
sh.slots.in <- slot
- }()
+ })
}
}()
free := sh.slots.out
diff --git a/pkg/sharky/shard_slots_test.go b/pkg/sharky/shard_slots_test.go
index 0a1ab02cf7e..7738a1595aa 100644
--- a/pkg/sharky/shard_slots_test.go
+++ b/pkg/sharky/shard_slots_test.go
@@ -156,18 +156,14 @@ func newShard(t *testing.T) *shard {
terminated := make(chan struct{})
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
shard.process()
close(terminated)
- }()
+ })
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
slots.process(terminated)
- }()
+ })
return shard
}
diff --git a/pkg/sharky/sharky_test.go b/pkg/sharky/sharky_test.go
index 268a996e7c0..1d4b78d51c4 100644
--- a/pkg/sharky/sharky_test.go
+++ b/pkg/sharky/sharky_test.go
@@ -185,11 +185,11 @@ func TestConcurrency(t *testing.T) {
ctx := context.Background()
eg, ectx := errgroup.WithContext(ctx)
// a number of workers write sequential numbers to sharky
- for k := 0; k < workers; k++ {
+ for k := range workers {
eg.Go(func() error {
<-start
buf := make([]byte, 4)
- for i := 0; i < limit; i++ {
+ for i := range limit {
j := i*workers + k
binary.BigEndian.PutUint32(buf, uint32(j))
loc, err := s.Write(ctx, buf)
@@ -212,7 +212,7 @@ func TestConcurrency(t *testing.T) {
eg.Go(func() error {
<-start
buf := make([]byte, datasize)
- for i := 0; i < limit; i++ {
+ for range limit {
select {
case <-ectx.Done():
return ectx.Err()
diff --git a/pkg/sharky/slots.go b/pkg/sharky/slots.go
index 1f5294f79b6..71d058f2b88 100644
--- a/pkg/sharky/slots.go
+++ b/pkg/sharky/slots.go
@@ -58,7 +58,7 @@ func (sl *slots) save() error {
// extensions are bytewise: can only be multiples of 8 bits
func (sl *slots) extend(n int) {
sl.size += uint32(n) * 8
- for i := 0; i < n; i++ {
+ for range n {
sl.data = append(sl.data, 0xff)
}
}
@@ -123,12 +123,10 @@ func (sl *slots) process(quit chan struct{}) {
out = nil
}
quit = nil
- sl.wg.Add(1)
- go func() {
- defer sl.wg.Done()
+ sl.wg.Go(func() {
sl.limboWG.Wait()
close(sl.in)
- }()
+ })
}
}
}
diff --git a/pkg/sharky/store.go b/pkg/sharky/store.go
index 59549ee4a4a..d8947f9d042 100644
--- a/pkg/sharky/store.go
+++ b/pkg/sharky/store.go
@@ -100,17 +100,13 @@ func (s *Store) create(index uint8, maxDataSize int, basedir fs.FS) (*shard, err
quit: s.quit,
}
terminated := make(chan struct{})
- sh.slots.wg.Add(1)
- go func() {
- defer sh.slots.wg.Done()
+ s.wg.Go(func() {
sh.process()
close(terminated)
- }()
- sh.slots.wg.Add(1)
- go func() {
- defer sh.slots.wg.Done()
+ })
+ s.wg.Go(func() {
sl.process(terminated)
- }()
+ })
return sh, nil
}
diff --git a/pkg/shed/example_store_test.go b/pkg/shed/example_store_test.go
index 6d77ce907a7..a6adbb2c703 100644
--- a/pkg/shed/example_store_test.go
+++ b/pkg/shed/example_store_test.go
@@ -238,7 +238,7 @@ func (s *Store) CollectGarbage() (err error) {
maxRounds := 10 // arbitrary number, needs to be calculated
// Run a few gc rounds.
- for roundCount := 0; roundCount < maxRounds; roundCount++ {
+ for range maxRounds {
var garbageCount int
// New batch for a new cg round.
trash := new(leveldb.Batch)
diff --git a/pkg/shed/field_struct.go b/pkg/shed/field_struct.go
index 4f909c17b21..9e6f7139800 100644
--- a/pkg/shed/field_struct.go
+++ b/pkg/shed/field_struct.go
@@ -45,7 +45,7 @@ func (db *DB) NewStructField(name string) (f StructField, err error) {
// Get unmarshals data from the database to a provided val.
// If the data is not found leveldb.ErrNotFound is returned.
-func (f StructField) Get(val interface{}) (err error) {
+func (f StructField) Get(val any) (err error) {
b, err := f.db.Get(f.key)
if err != nil {
return err
@@ -54,7 +54,7 @@ func (f StructField) Get(val interface{}) (err error) {
}
// Put marshals provided val and saves it to the database.
-func (f StructField) Put(val interface{}) (err error) {
+func (f StructField) Put(val any) (err error) {
b, err := json.Marshal(val)
if err != nil {
return err
@@ -63,7 +63,7 @@ func (f StructField) Put(val interface{}) (err error) {
}
// PutInBatch marshals provided val and puts it into the batch.
-func (f StructField) PutInBatch(batch *leveldb.Batch, val interface{}) (err error) {
+func (f StructField) PutInBatch(batch *leveldb.Batch, val any) (err error) {
b, err := json.Marshal(val)
if err != nil {
return err
diff --git a/pkg/shed/vector_uint64_test.go b/pkg/shed/vector_uint64_test.go
index 2d602a034c0..4eeaf1cdaf3 100644
--- a/pkg/shed/vector_uint64_test.go
+++ b/pkg/shed/vector_uint64_test.go
@@ -56,7 +56,7 @@ func TestUint64Vector(t *testing.T) {
}
for _, index := range []uint64{0, 1, 2, 5, 100} {
- var want uint64 = 42 + index
+ var want = 42 + index
err = bins.Put(index, want)
if err != nil {
t.Fatal(err)
@@ -70,7 +70,7 @@ func TestUint64Vector(t *testing.T) {
}
t.Run("overwrite", func(t *testing.T) {
- var want uint64 = 84 + index
+ var want = 84 + index
err = bins.Put(index, want)
if err != nil {
t.Fatal(err)
@@ -97,7 +97,7 @@ func TestUint64Vector(t *testing.T) {
for _, index := range []uint64{0, 1, 2, 3, 5, 10} {
batch := new(leveldb.Batch)
- var want uint64 = 43 + index
+ var want = 43 + index
bins.PutInBatch(batch, index, want)
err = db.WriteBatch(batch)
if err != nil {
@@ -113,7 +113,7 @@ func TestUint64Vector(t *testing.T) {
t.Run("overwrite", func(t *testing.T) {
batch := new(leveldb.Batch)
- var want uint64 = 85 + index
+ var want = 85 + index
bins.PutInBatch(batch, index, want)
err = db.WriteBatch(batch)
if err != nil {
diff --git a/pkg/spinlock/wait_test.go b/pkg/spinlock/wait_test.go
index 090543723b1..3091da15ddb 100644
--- a/pkg/spinlock/wait_test.go
+++ b/pkg/spinlock/wait_test.go
@@ -7,37 +7,37 @@ package spinlock_test
import (
"errors"
"testing"
+ "testing/synctest"
"time"
"github.com/ethersphere/bee/v2/pkg/spinlock"
)
func TestWait(t *testing.T) {
- t.Parallel()
t.Run("timed out", func(t *testing.T) {
- t.Parallel()
-
- err := spinlock.Wait(time.Millisecond*20, func() bool { return false })
- if !errors.Is(err, spinlock.ErrTimedOut) {
- t.Fatal("expecting to time out")
- }
+ synctest.Test(t, func(t *testing.T) {
+ err := spinlock.Wait(time.Millisecond*20, func() bool { return false })
+ if !errors.Is(err, spinlock.ErrTimedOut) {
+ t.Fatal("expecting to time out")
+ }
+ })
})
t.Run("condition satisfied", func(t *testing.T) {
- t.Parallel()
-
- spinStartTime := time.Now()
- condCallCount := 0
- err := spinlock.Wait(time.Millisecond*200, func() bool {
- condCallCount++
- return time.Since(spinStartTime) >= time.Millisecond*100
+ synctest.Test(t, func(t *testing.T) {
+ spinStartTime := time.Now()
+ condCallCount := 0
+ err := spinlock.Wait(time.Millisecond*200, func() bool {
+ condCallCount++
+ return time.Since(spinStartTime) >= time.Millisecond*100
+ })
+ if err != nil {
+ t.Fatal("expecting to end wait without time out")
+ }
+ if condCallCount == 0 {
+ t.Fatal("expecting condition function to be called")
+ }
})
- if err != nil {
- t.Fatal("expecting to end wait without time out")
- }
- if condCallCount == 0 {
- t.Fatal("expecting condition function to be called")
- }
})
}
diff --git a/pkg/statestore/leveldb/leveldb.go b/pkg/statestore/leveldb/leveldb.go
index 08d757ce288..8ddd71d0215 100644
--- a/pkg/statestore/leveldb/leveldb.go
+++ b/pkg/statestore/leveldb/leveldb.go
@@ -76,7 +76,7 @@ func NewStateStore(path string, l log.Logger) (*Store, error) {
// Get retrieves a value of the requested key. If no results are found,
// storage.ErrNotFound will be returned.
-func (s *Store) Get(key string, i interface{}) error {
+func (s *Store) Get(key string, i any) error {
data, err := s.db.Get([]byte(key), nil)
if err != nil {
if errors.Is(err, leveldb.ErrNotFound) {
@@ -95,7 +95,7 @@ func (s *Store) Get(key string, i interface{}) error {
// Put stores a value for an arbitrary key. BinaryMarshaler
// interface method will be called on the provided value
// with fallback to JSON serialization.
-func (s *Store) Put(key string, i interface{}) (err error) {
+func (s *Store) Put(key string, i any) (err error) {
var bytes []byte
if marshaler, ok := i.(encoding.BinaryMarshaler); ok {
if bytes, err = marshaler.MarshalBinary(); err != nil {
diff --git a/pkg/statestore/mock/store.go b/pkg/statestore/mock/store.go
index a3d34542f2c..7c30fca12b5 100644
--- a/pkg/statestore/mock/store.go
+++ b/pkg/statestore/mock/store.go
@@ -28,7 +28,7 @@ func NewStateStore() storage.StateStorer {
return s
}
-func (s *store) Get(key string, i interface{}) (err error) {
+func (s *store) Get(key string, i any) (err error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
@@ -44,7 +44,7 @@ func (s *store) Get(key string, i interface{}) (err error) {
return json.Unmarshal(data, i)
}
-func (s *store) Put(key string, i interface{}) (err error) {
+func (s *store) Put(key string, i any) (err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
diff --git a/pkg/statestore/storeadapter/storeadapter.go b/pkg/statestore/storeadapter/storeadapter.go
index 2f05f1f31c7..95e775bffb2 100644
--- a/pkg/statestore/storeadapter/storeadapter.go
+++ b/pkg/statestore/storeadapter/storeadapter.go
@@ -25,7 +25,7 @@ var _ storage.Item = (*proxyItem)(nil)
type proxyItem struct {
ns string
key string
- obj interface{}
+ obj any
}
// ID implements Item interface.
@@ -91,7 +91,7 @@ func (pi proxyItem) String() string {
}
// newProxyItem creates a new proxyItem.
-func newProxyItem(key string, obj interface{}) *proxyItem {
+func newProxyItem(key string, obj any) *proxyItem {
return &proxyItem{ns: stateStoreNamespace, key: key, obj: obj}
}
@@ -105,11 +105,11 @@ type rawItem struct {
// Marshal implements Item interface.
func (ri *rawItem) Marshal() ([]byte, error) {
- if ri == nil || ri.proxyItem == nil || ri.proxyItem.obj == nil {
+ if ri == nil || ri.proxyItem == nil || ri.obj == nil {
return nil, nil
}
- if buf, ok := ri.proxyItem.obj.([]byte); ok {
+ if buf, ok := ri.obj.([]byte); ok {
return buf, nil
}
@@ -118,12 +118,12 @@ func (ri *rawItem) Marshal() ([]byte, error) {
// Unmarshal implements Item interface.
func (ri *rawItem) Unmarshal(data []byte) error {
- if ri == nil || ri.proxyItem == nil || ri.proxyItem.obj == nil || len(data) == 0 {
+ if ri == nil || ri.proxyItem == nil || ri.obj == nil || len(data) == 0 {
return nil
}
- if buf, ok := ri.proxyItem.obj.([]byte); ok {
- ri.proxyItem.obj = append(buf[:0], data...)
+ if buf, ok := ri.obj.([]byte); ok {
+ ri.obj = append(buf[:0], data...)
return nil
}
@@ -146,12 +146,12 @@ func (s *StateStorerAdapter) Close() error {
}
// Get implements StateStorer interface.
-func (s *StateStorerAdapter) Get(key string, obj interface{}) (err error) {
+func (s *StateStorerAdapter) Get(key string, obj any) (err error) {
return s.storage.Get(newProxyItem(key, obj))
}
// Put implements StateStorer interface.
-func (s *StateStorerAdapter) Put(key string, obj interface{}) (err error) {
+func (s *StateStorerAdapter) Put(key string, obj any) (err error) {
return s.storage.Put(newProxyItem(key, obj))
}
diff --git a/pkg/statestore/test/store.go b/pkg/statestore/test/store.go
index 10bc9c15c83..48cb487edfe 100644
--- a/pkg/statestore/test/store.go
+++ b/pkg/statestore/test/store.go
@@ -159,7 +159,7 @@ func insertValues(t *testing.T, store storage.StateStorer, key1, key2 string, va
func insert(t *testing.T, store storage.StateStorer, prefix string, count int) {
t.Helper()
- for i := 0; i < count; i++ {
+ for i := range count {
k := prefix + fmt.Sprint(i)
err := store.Put(k, i)
diff --git a/pkg/status/status_test.go b/pkg/status/status_test.go
index 09213ec71d4..2b3f9383fe3 100644
--- a/pkg/status/status_test.go
+++ b/pkg/status/status_test.go
@@ -249,12 +249,12 @@ type statusSnapshotMock struct {
*pb.Snapshot
}
-func (m *statusSnapshotMock) SyncRate() float64 { return m.Snapshot.PullsyncRate }
+func (m *statusSnapshotMock) SyncRate() float64 { return m.PullsyncRate }
func (m *statusSnapshotMock) ReserveSize() int { return int(m.Snapshot.ReserveSize) }
func (m *statusSnapshotMock) StorageRadius() uint8 { return uint8(m.Snapshot.StorageRadius) }
-func (m *statusSnapshotMock) Commitment() (uint64, error) { return m.Snapshot.BatchCommitment, nil }
+func (m *statusSnapshotMock) Commitment() (uint64, error) { return m.BatchCommitment, nil }
func (m *statusSnapshotMock) GetChainState() *postage.ChainState {
- return &postage.ChainState{Block: m.Snapshot.LastSyncedBlock}
+ return &postage.ChainState{Block: m.LastSyncedBlock}
}
func (m *statusSnapshotMock) ReserveSizeWithinRadius() uint64 {
return m.Snapshot.ReserveSizeWithinRadius
diff --git a/pkg/storage/inmemstore/inmemstore.go b/pkg/storage/inmemstore/inmemstore.go
index b0052602aa1..88076df2722 100644
--- a/pkg/storage/inmemstore/inmemstore.go
+++ b/pkg/storage/inmemstore/inmemstore.go
@@ -104,7 +104,7 @@ func (s *Store) Count(k storage.Key) (int, error) {
defer s.mu.RUnlock()
count := 0
- s.st.WalkPrefix(k.Namespace(), func(_ string, _ interface{}) bool {
+ s.st.WalkPrefix(k.Namespace(), func(_ string, _ any) bool {
count++
return false
})
@@ -116,7 +116,7 @@ func (s *Store) Iterate(q storage.Query, fn storage.IterateFn) error {
return fmt.Errorf("failed iteration: %w", err)
}
- getNext := func(k string, v interface{}) (*storage.Result, error) {
+ getNext := func(k string, v any) (*storage.Result, error) {
for _, filter := range q.Filters {
if filter(idFromKey(k, q.Factory().Namespace()), v.([]byte)) {
return nil, nil
@@ -155,7 +155,7 @@ func (s *Store) Iterate(q storage.Query, fn storage.IterateFn) error {
switch q.Order {
case storage.KeyAscendingOrder:
- s.st.WalkPrefix(prefix, func(k string, v interface{}) bool {
+ s.st.WalkPrefix(prefix, func(k string, v any) bool {
if q.PrefixAtStart && !skipUntil {
if k >= prefix+separator+q.Prefix {
@@ -195,7 +195,7 @@ func (s *Store) Iterate(q storage.Query, fn storage.IterateFn) error {
// For now, inmem implementation is not meant to work for large datasets, so first option
// is chosen.
results := make([]storage.Result, 0)
- s.st.WalkPrefix(prefix, func(k string, v interface{}) bool {
+ s.st.WalkPrefix(prefix, func(k string, v any) bool {
res, err := getNext(k, v)
if err != nil {
retErr = errors.Join(retErr, err)
diff --git a/pkg/storage/migration/index_test.go b/pkg/storage/migration/index_test.go
index ec35859d01f..2247c36ebef 100644
--- a/pkg/storage/migration/index_test.go
+++ b/pkg/storage/migration/index_test.go
@@ -326,7 +326,7 @@ func TestOptions(t *testing.T) {
func populateStore(t *testing.T, s storage.Store, count int) {
t.Helper()
- for i := 0; i < count; i++ {
+ for i := range count {
item := &obj{id: i, val: i}
if err := s.Put(item); err != nil {
t.Fatalf("populate store should succeed: %v", err)
diff --git a/pkg/storage/migration/steps_chain_test.go b/pkg/storage/migration/steps_chain_test.go
index e5a69917f65..327a41331d7 100644
--- a/pkg/storage/migration/steps_chain_test.go
+++ b/pkg/storage/migration/steps_chain_test.go
@@ -22,7 +22,7 @@ func TestNewStepsChain(t *testing.T) {
stepsFn := make([]migration.StepFn, 0)
// Create 10 step functions where each would remove single element, having value [0-10)
- for i := 0; i < 10; i++ {
+ for i := range 10 {
valForRemoval := i
var stepFn migration.StepFn
diff --git a/pkg/storage/statestore.go b/pkg/storage/statestore.go
index 53520dea1be..b04d36d3551 100644
--- a/pkg/storage/statestore.go
+++ b/pkg/storage/statestore.go
@@ -16,10 +16,10 @@ type StateStorer interface {
io.Closer
// Get unmarshalls object with the given key into the given obj.
- Get(key string, obj interface{}) error
+ Get(key string, obj any) error
// Put inserts or updates the given obj stored under the given key.
- Put(key string, obj interface{}) error
+ Put(key string, obj any) error
// Delete removes object form the store stored under the given key.
Delete(key string) error
diff --git a/pkg/storage/storagetest/benchmark.go b/pkg/storage/storagetest/benchmark.go
index ea01d58121c..8d051a06391 100644
--- a/pkg/storage/storagetest/benchmark.go
+++ b/pkg/storage/storagetest/benchmark.go
@@ -37,7 +37,7 @@ const (
func randomBytes(r *rand.Rand, n int) []byte {
b := make([]byte, n)
- for i := 0; i < n; i++ {
+ for i := range n {
b[i] = ' ' + byte(r.Intn('~'-' '+1))
}
return b
@@ -104,7 +104,7 @@ func newStartAtEntryGenerator(start int, g entryGenerator) entryGenerator {
func newSequentialKeys(size int, start int, keyFormat string) [][]byte {
keys := make([][]byte, size)
buffer := make([]byte, size*keyLen)
- for i := 0; i < size; i++ {
+ for i := range size {
begin, end := i*keyLen, (i+1)*keyLen
key := buffer[begin:begin:end]
_, _ = fmt.Fprintf(bytes.NewBuffer(key), keyFormat, start+i)
@@ -117,7 +117,7 @@ func newRandomKeys(n int, format string) [][]byte {
r := rand.New(rand.NewSource(time.Now().Unix()))
keys := make([][]byte, n)
buffer := make([]byte, n*keyLen)
- for i := 0; i < n; i++ {
+ for i := range n {
begin, end := i*keyLen, (i+1)*keyLen
key := buffer[begin:begin:end]
_, _ = fmt.Fprintf(bytes.NewBuffer(key), format, r.Intn(n))
@@ -129,7 +129,7 @@ func newRandomKeys(n int, format string) [][]byte {
func newFullRandomKeys(size int, start int, format string) [][]byte {
keys := newSequentialKeys(size, start, format)
r := rand.New(rand.NewSource(time.Now().Unix()))
- for i := 0; i < size; i++ {
+ for i := range size {
j := r.Intn(size)
keys[i], keys[j] = keys[j], keys[i]
}
@@ -227,7 +227,7 @@ func maxInt(a int, b int) int {
func doRead(b *testing.B, db storage.Store, g keyGenerator, allowNotFound bool) {
b.Helper()
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
key := g.Key(i)
item := &obj1{
Id: string(key),
@@ -269,7 +269,7 @@ func doWrite(b *testing.B, db storage.Store, g entryGenerator) {
b.Helper()
w := newDBWriter(db)
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
if err := w.Put(g.Key(i), g.Value(i)); err != nil {
b.Fatalf("write key '%s': %v", string(g.Key(i)), err)
}
@@ -280,7 +280,7 @@ func doDelete(b *testing.B, db storage.Store, g keyGenerator) {
b.Helper()
w := newDBWriter(db)
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
if err := w.Delete(g.Key(i)); err != nil {
b.Fatalf("delete key '%s': %v", string(g.Key(i)), err)
}
@@ -304,7 +304,7 @@ func populate(b *testing.B, db storage.Store) {
func doDeleteChunk(b *testing.B, db storage.ChunkStore, g keyGenerator) {
b.Helper()
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
addr := swarm.MustParseHexAddress(string(g.Key(i)))
if err := db.Delete(context.Background(), addr); err != nil {
b.Fatalf("delete key '%s': %v", string(g.Key(i)), err)
@@ -315,7 +315,7 @@ func doDeleteChunk(b *testing.B, db storage.ChunkStore, g keyGenerator) {
func doWriteChunk(b *testing.B, db storage.Putter, g entryGenerator) {
b.Helper()
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
buf := make([]byte, swarm.HashSize)
if _, err := hex.Decode(buf, g.Key(i)); err != nil {
b.Fatalf("decode value: %v", err)
@@ -331,7 +331,7 @@ func doWriteChunk(b *testing.B, db storage.Putter, g entryGenerator) {
func doReadChunk(b *testing.B, db storage.ChunkStore, g keyGenerator, allowNotFound bool) {
b.Helper()
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
key := string(g.Key(i))
addr := swarm.MustParseHexAddress(key)
_, err := db.Get(context.Background(), addr)
diff --git a/pkg/storage/storagetest/storage.go b/pkg/storage/storagetest/storage.go
index d385d3941fc..9ee41971b43 100644
--- a/pkg/storage/storagetest/storage.go
+++ b/pkg/storage/storagetest/storage.go
@@ -950,7 +950,7 @@ func BenchmarkWriteInBatches(b *testing.B, bs storage.BatchStore) {
g := newSequentialEntryGenerator(b.N)
batch := bs.Batch(context.Background())
resetBenchmark(b)
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
key := g.Key(i)
item := &obj1{
Id: string(key),
@@ -969,7 +969,7 @@ func BenchmarkWriteInFixedSizeBatches(b *testing.B, bs storage.BatchStore) {
g := newSequentialEntryGenerator(b.N)
writer := newBatchDBWriter(bs)
resetBenchmark(b)
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
writer.Put(g.Key(i), g.Value(i))
}
}
@@ -984,7 +984,7 @@ func BenchmarkWriteRandom(b *testing.B, db storage.Store) {
start, step := 0, (b.N+parallelism)/parallelism
n := step * parallelism
g := newFullRandomEntryGenerator(0, n)
- for i := 0; i < parallelism; i++ {
+ for range parallelism {
gens = append(gens, newStartAtEntryGenerator(start, g))
start += step
}
@@ -1021,7 +1021,7 @@ func BenchmarkDeleteInBatches(b *testing.B, bs storage.BatchStore) {
doWrite(b, bs, g)
resetBenchmark(b)
batch := bs.Batch(context.Background())
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
item := &obj1{
Id: string(g.Key(i)),
}
@@ -1039,7 +1039,7 @@ func BenchmarkDeleteInFixedSizeBatches(b *testing.B, bs storage.BatchStore) {
doWrite(b, bs, g)
resetBenchmark(b)
writer := newBatchDBWriter(bs)
- for i := 0; i < b.N; i++ {
+ for i := 0; b.Loop(); i++ {
writer.Delete(g.Key(i))
}
}
diff --git a/pkg/storage/testing/chunk.go b/pkg/storage/testing/chunk.go
index 6c625aceedd..512cf1746c6 100644
--- a/pkg/storage/testing/chunk.go
+++ b/pkg/storage/testing/chunk.go
@@ -76,7 +76,7 @@ func GenerateTestRandomInvalidChunk() swarm.Chunk {
// Chunks by using GenerateTestRandomChunk function.
func GenerateTestRandomChunks(count int) []swarm.Chunk {
chunks := make([]swarm.Chunk, count)
- for i := 0; i < count; i++ {
+ for i := range count {
chunks[i] = GenerateTestRandomChunk()
}
return chunks
diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go
index a87540d390d..0b3feb31ea6 100644
--- a/pkg/storageincentives/agent.go
+++ b/pkg/storageincentives/agent.go
@@ -46,7 +46,7 @@ type ChainBackend interface {
BlockNumber(context.Context) (uint64, error)
HeaderByNumber(context.Context, *big.Int) (*types.Header, error)
BalanceAt(ctx context.Context, address common.Address, block *big.Int) (*big.Int, error)
- SuggestGasPrice(ctx context.Context) (*big.Int, error)
+ SuggestedFeeAndTip(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error)
}
type Health interface {
@@ -417,7 +417,7 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) {
}
if !a.state.IsHealthy() {
- a.logger.Info("skipping round because node is unhealhy", "round", round)
+ a.logger.Info("skipping round because node is unhealthy", "round", round)
return false, nil
}
@@ -604,7 +604,7 @@ func (a *Agent) HasEnoughFundsToPlay(ctx context.Context) (*big.Int, bool, error
return nil, false, err
}
- price, err := a.backend.SuggestGasPrice(ctx)
+ price, _, err := a.backend.SuggestedFeeAndTip(ctx, nil, redistribution.BoostTipPercent)
if err != nil {
return nil, false, err
}
diff --git a/pkg/storageincentives/agent_test.go b/pkg/storageincentives/agent_test.go
index 8af6b1d463e..6449ede9059 100644
--- a/pkg/storageincentives/agent_test.go
+++ b/pkg/storageincentives/agent_test.go
@@ -10,6 +10,7 @@ import (
"math/big"
"sync"
"testing"
+ "testing/synctest"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -95,66 +96,66 @@ func TestAgent(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- t.Parallel()
+ synctest.Test(t, func(t *testing.T) {
+ wait := make(chan struct{}, 1)
+ addr := swarm.RandAddress(t)
+
+ backend := &mockchainBackend{
+ limit: tc.limit,
+ limitCallback: func() {
+ wait <- struct{}{}
+ },
+ incrementBy: tc.incrementBy,
+ block: tc.blocksPerRound,
+ balance: tc.balance,
+ }
- wait := make(chan struct{})
- addr := swarm.RandAddress(t)
+ var radius uint8 = 8
- backend := &mockchainBackend{
- limit: tc.limit,
- limitCallback: func() {
- select {
- case wait <- struct{}{}:
- default:
- }
- },
- incrementBy: tc.incrementBy,
- block: tc.blocksPerRound,
- balance: tc.balance,
- }
+ contract := &mockContract{t: t, expectedRadius: radius + tc.doubling}
- var radius uint8 = 8
+ service, _ := createService(t, addr, backend, contract, tc.blocksPerRound, tc.blocksPerPhase, radius, tc.doubling)
+ testutil.CleanupCloser(t, service)
- contract := &mockContract{t: t, expectedRadius: radius + tc.doubling}
+ <-wait
- service, _ := createService(t, addr, backend, contract, tc.blocksPerRound, tc.blocksPerPhase, radius, tc.doubling)
- testutil.CleanupCloser(t, service)
+ synctest.Wait()
- <-wait
+ calls := contract.getCalls()
- if !tc.expectedCalls {
- if len(contract.callsList) > 0 {
- t.Fatal("got unexpected calls")
- } else {
+ if !tc.expectedCalls {
+ if len(calls) > 0 {
+ t.Fatal("got unexpected calls")
+ }
return
}
- }
- assertOrder := func(t *testing.T, want, got contractCall) {
- t.Helper()
- if want != got {
- t.Fatalf("expected call %s, got %s", want, got)
+ if len(calls) == 0 {
+ t.Fatal("expected calls but got none")
}
- }
- contract.mtx.Lock()
- defer contract.mtx.Unlock()
+ assertOrder := func(t *testing.T, want, got contractCall) {
+ t.Helper()
+ if want != got {
+ t.Fatalf("expected call %s, got %s", want, got)
+ }
+ }
- prevCall := contract.callsList[0]
+ prevCall := calls[0]
- for i := 1; i < len(contract.callsList); i++ {
+ for i := 1; i < len(calls); i++ {
+ switch calls[i] {
+ case isWinnerCall:
+ assertOrder(t, revealCall, prevCall)
+ case revealCall:
+ assertOrder(t, commitCall, prevCall)
+ case commitCall:
+ assertOrder(t, isWinnerCall, prevCall)
+ }
- switch contract.callsList[i] {
- case isWinnerCall:
- assertOrder(t, revealCall, prevCall)
- case revealCall:
- assertOrder(t, commitCall, prevCall)
- case commitCall:
- assertOrder(t, isWinnerCall, prevCall)
+ prevCall = calls[i]
}
-
- prevCall = contract.callsList[i]
- }
+ })
})
}
}
@@ -242,8 +243,8 @@ func (m *mockchainBackend) BalanceAt(ctx context.Context, address common.Address
return m.balance, nil
}
-func (m *mockchainBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
- return big.NewInt(4), nil
+func (m *mockchainBackend) SuggestedFeeAndTip(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return big.NewInt(4), big.NewInt(5), nil
}
type contractCall int
@@ -276,6 +277,18 @@ type mockContract struct {
t *testing.T
}
+// getCalls returns a snapshot of the calls list
+// even after synctest.Wait() all goroutines are blocked, we still should use locking
+// for defensive programming.
+func (m *mockContract) getCalls() []contractCall {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ // return a copy to avoid external modifications
+ calls := make([]contractCall, len(m.callsList))
+ copy(calls, m.callsList)
+ return calls
+}
+
func (m *mockContract) ReserveSalt(context.Context) ([]byte, error) {
return nil, nil
}
diff --git a/pkg/storageincentives/events_test.go b/pkg/storageincentives/events_test.go
index 0ea4547a145..685512ebd73 100644
--- a/pkg/storageincentives/events_test.go
+++ b/pkg/storageincentives/events_test.go
@@ -7,87 +7,88 @@ package storageincentives_test
import (
"context"
"testing"
+ "testing/synctest"
"time"
"github.com/ethersphere/bee/v2/pkg/storageincentives"
)
func TestClose(t *testing.T) {
- t.Parallel()
-
- ev := storageincentives.NewEvents()
-
- done1 := make(chan struct{})
- done2 := make(chan struct{})
- done3 := make(chan struct{})
-
- ev.On(1, func(ctx context.Context) {
- <-ctx.Done()
- close(done1)
- })
-
- ev.On(1, func(ctx context.Context) {
- <-ctx.Done()
- close(done2)
- })
-
- ev.On(2, func(ctx context.Context) {
- <-ctx.Done()
- close(done3)
- })
-
- ev.Publish(1)
- ev.Publish(2)
-
- ev.Close()
-
- for i := 0; i < 3; i++ {
- select {
- case <-done1:
- case <-done2:
- case <-done3:
- case <-time.After(time.Second):
- t.Fatal("timeout")
+ synctest.Test(t, func(t *testing.T) {
+ ev := storageincentives.NewEvents()
+
+ done1 := make(chan struct{})
+ done2 := make(chan struct{})
+ done3 := make(chan struct{})
+
+ ev.On(1, func(ctx context.Context) {
+ <-ctx.Done()
+ close(done1)
+ })
+
+ ev.On(1, func(ctx context.Context) {
+ <-ctx.Done()
+ close(done2)
+ })
+
+ ev.On(2, func(ctx context.Context) {
+ <-ctx.Done()
+ close(done3)
+ })
+
+ ev.Publish(1)
+ ev.Publish(2)
+
+ ev.Close()
+
+ for range 3 {
+ select {
+ case <-done1:
+ case <-done2:
+ case <-done3:
+ case <-time.After(time.Second):
+ t.Fatal("timeout")
+ }
}
- }
+ })
}
func TestPhaseCancel(t *testing.T) {
- t.Parallel()
-
- ev := storageincentives.NewEvents()
-
- done1 := make(chan struct{})
- done2 := make(chan struct{})
- defer ev.Close()
-
- // ensure no panics occur on an empty publish
- ev.Publish(0)
-
- ev.On(1, func(ctx context.Context) {
- <-ctx.Done()
- close(done1)
- })
-
- ev.On(2, func(ctx context.Context) {
- <-ctx.Done()
- close(done2)
- })
-
- ev.On(3, func(ctx context.Context) {
- ev.Cancel(1, 2)
- })
-
- ev.Publish(1)
- ev.Publish(2)
- ev.Publish(3)
-
- for i := 0; i < 2; i++ {
- select {
- case <-done1:
- case <-done2:
- case <-time.After(time.Second):
- t.Fatal("timeout")
+ synctest.Test(t, func(t *testing.T) {
+ ev := storageincentives.NewEvents()
+
+ done1 := make(chan struct{})
+ done2 := make(chan struct{})
+ defer ev.Close()
+
+ // ensure no panics occur on an empty publish
+ ev.Publish(0)
+
+ ev.On(1, func(ctx context.Context) {
+ <-ctx.Done()
+ close(done1)
+ })
+
+ ev.On(2, func(ctx context.Context) {
+ <-ctx.Done()
+ close(done2)
+ })
+
+ ev.On(3, func(ctx context.Context) {
+ ev.Cancel(1, 2)
+ })
+
+ ev.Publish(1)
+ ev.Publish(2)
+ ev.Publish(3)
+
+ for range 2 {
+ select {
+ case <-done1:
+ case <-done2:
+ case <-time.After(time.Second):
+ t.Fatal("timeout")
+ }
}
- }
+ })
}
diff --git a/pkg/storageincentives/proof_test.go b/pkg/storageincentives/proof_test.go
index de7e14f4e13..b0fcc8c8952 100644
--- a/pkg/storageincentives/proof_test.go
+++ b/pkg/storageincentives/proof_test.go
@@ -68,7 +68,7 @@ func TestMakeInclusionProofsRegression(t *testing.T) {
// generate chunks that will be used as sample
sampleChunks := make([]swarm.Chunk, 0, sampleSize)
- for i := 0; i < sampleSize; i++ {
+ for i := range sampleSize {
ch, err := cac.New(fmt.Appendf(nil, "Unstoppable data! Chunk #%d", i+1))
if err != nil {
t.Fatal(err)
diff --git a/pkg/storageincentives/redistribution/redistribution.go b/pkg/storageincentives/redistribution/redistribution.go
index 4657c81cbff..52b2245db75 100644
--- a/pkg/storageincentives/redistribution/redistribution.go
+++ b/pkg/storageincentives/redistribution/redistribution.go
@@ -17,7 +17,10 @@ import (
"github.com/ethersphere/bee/v2/pkg/transaction"
)
-const loggerName = "redistributionContract"
+const (
+ loggerName = "redistributionContract"
+ BoostTipPercent = 50
+)
type Contract interface {
ReserveSalt(context.Context) ([]byte, error)
@@ -117,7 +120,7 @@ func (c *contract) Claim(ctx context.Context, proofs ChunkInclusionProofs) (comm
Value: big.NewInt(0),
Description: "claim win transaction",
}
- txHash, err := c.sendAndWait(ctx, request, transaction.RedistributionTipBoostPercent)
+ txHash, err := c.sendAndWait(ctx, request, BoostTipPercent)
if err != nil {
return txHash, fmt.Errorf("claim: %w", err)
}
@@ -140,7 +143,7 @@ func (c *contract) Commit(ctx context.Context, obfusHash []byte, round uint64) (
Value: big.NewInt(0),
Description: "commit transaction",
}
- txHash, err := c.sendAndWait(ctx, request, transaction.RedistributionTipBoostPercent)
+ txHash, err := c.sendAndWait(ctx, request, BoostTipPercent)
if err != nil {
return txHash, fmt.Errorf("commit: obfusHash %v: %w", common.BytesToHash(obfusHash), err)
}
@@ -163,7 +166,7 @@ func (c *contract) Reveal(ctx context.Context, storageDepth uint8, reserveCommit
Value: big.NewInt(0),
Description: "reveal transaction",
}
- txHash, err := c.sendAndWait(ctx, request, transaction.RedistributionTipBoostPercent)
+ txHash, err := c.sendAndWait(ctx, request, BoostTipPercent)
if err != nil {
return txHash, fmt.Errorf("reveal: storageDepth %d reserveCommitmentHash %v RandomNonce %v: %w", storageDepth, common.BytesToHash(reserveCommitmentHash), common.BytesToHash(RandomNonce), err)
}
diff --git a/pkg/storageincentives/redistributionstate_test.go b/pkg/storageincentives/redistributionstate_test.go
index 063991c31d8..38c3ae8280d 100644
--- a/pkg/storageincentives/redistributionstate_test.go
+++ b/pkg/storageincentives/redistributionstate_test.go
@@ -199,7 +199,7 @@ func TestPurgeRoundData(t *testing.T) {
hasRoundData := make([]bool, roundsCount)
// Populate data at random rounds
- for i := uint64(0); i < roundsCount; i++ {
+ for i := range uint64(roundsCount) {
v := rand.Int()%2 == 0
hasRoundData[i] = v
if v {
@@ -210,7 +210,7 @@ func TestPurgeRoundData(t *testing.T) {
// Run purge successively and assert that all data is purged up to
// currentRound - purgeDataOlderThenXRounds
- for i := uint64(0); i < roundsCount; i++ {
+ for i := range uint64(roundsCount) {
state.SetCurrentEvent(0, i)
state.purgeStaleRoundData()
@@ -229,7 +229,7 @@ func TestPurgeRoundData(t *testing.T) {
state.purgeStaleRoundData()
// One more time assert that everything was purged
- for i := uint64(0); i < roundsCount; i++ {
+ for i := range uint64(roundsCount) {
assertHasDataAtRound(i, false)
}
}
diff --git a/pkg/storageincentives/soc_mine_test.go b/pkg/storageincentives/soc_mine_test.go
index 29a8b5e0898..0265a9a21f7 100644
--- a/pkg/storageincentives/soc_mine_test.go
+++ b/pkg/storageincentives/soc_mine_test.go
@@ -14,6 +14,7 @@ import (
"os"
"sync"
"testing"
+ "testing/synctest"
"github.com/ethersphere/bee/v2/pkg/bmt"
"github.com/ethersphere/bee/v2/pkg/cac"
@@ -32,60 +33,62 @@ import (
// to generate uploads using the input
// cat socs.txt | tail 19 | head 16 | perl -pne 's/([a-f0-9]+)\t([a-f0-9]+)\t([a-f0-9]+)\t([a-f0-9]+)/echo -n $4 | xxd -r -p | curl -X POST \"http:\/\/localhost:1633\/soc\/$1\/$2?sig=$3\" -H \"accept: application\/json, text\/plain, \/\" -H \"content-type: application\/octet-stream\" -H \"swarm-postage-batch-id: 14b26beca257e763609143c6b04c2c487f01a051798c535c2f542ce75a97c05f\" --data-binary \@-/'
func TestSocMine(t *testing.T) {
- t.Parallel()
- // the anchor used in neighbourhood selection and reserve salt for sampling
- prefix, err := hex.DecodeString("3617319a054d772f909f7c479a2cebe5066e836a939412e32403c99029b92eff")
- if err != nil {
- t.Fatal(err)
- }
- // the transformed address hasher factory function
- prefixhasher := func() hash.Hash { return swarm.NewPrefixHasher(prefix) }
- trHasher := func() hash.Hash { return bmt.NewHasher(prefixhasher) }
- // the bignum cast of the maximum sample value (upper bound on transformed addresses as a 256-bit article)
- // this constant is for a minimum reserve size of 2 million chunks with sample size of 16
- // = 1.284401 * 10^71 = 1284401 + 66 0-s
- mstring := "1284401"
- for i := 0; i < 66; i++ {
- mstring = mstring + "0"
- }
- n, ok := new(big.Int).SetString(mstring, 10)
- if !ok {
- t.Fatalf("SetString: error setting to '%s'", mstring)
- }
- // the filter function on the SOC address
- // meant to make sure we pass check for proof of retrievability for
- // a node of overlay 0x65xxx with a reserve depth of 1, i.e.,
- // SOC address must start with zero bit
- filterSOCAddr := func(a swarm.Address) bool {
- return a.Bytes()[0]&0x80 != 0x00
- }
- // the filter function on the transformed address using the density estimation constant
- filterTrAddr := func(a swarm.Address) (bool, error) {
- m := new(big.Int).SetBytes(a.Bytes())
- return m.Cmp(n) < 0, nil
- }
- // setup the signer with a private key from a fixture
- data, err := hex.DecodeString("634fb5a872396d9693e5c9f9d7233cfa93f395c093371017ff44aa9ae6564cdd")
- if err != nil {
- t.Fatal(err)
- }
- privKey, err := crypto.DecodeSecp256k1PrivateKey(data)
- if err != nil {
- t.Fatal(err)
- }
- signer := crypto.NewDefaultSigner(privKey)
+ synctest.Test(t, func(t *testing.T) {
+ // the anchor used in neighbourhood selection and reserve salt for sampling
+ prefix, err := hex.DecodeString("3617319a054d772f909f7c479a2cebe5066e836a939412e32403c99029b92eff")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // the transformed address hasher factory function
+ prefixhasher := func() hash.Hash { return swarm.NewPrefixHasher(prefix) }
+ // Create a pool for efficient hasher reuse
+ trHasherPool := bmt.NewPool(bmt.NewConf(prefixhasher, swarm.BmtBranches, 8))
+ // the bignum cast of the maximum sample value (upper bound on transformed addresses as a 256-bit article)
+ // this constant is for a minimum reserve size of 2 million chunks with sample size of 16
+ // = 1.284401 * 10^71 = 1284401 + 66 0-s
+ mstring := "1284401"
+ for range 66 {
+ mstring = mstring + "0"
+ }
+ n, ok := new(big.Int).SetString(mstring, 10)
+ if !ok {
+ t.Fatalf("SetString: error setting to '%s'", mstring)
+ }
+ // the filter function on the SOC address
+ // meant to make sure we pass check for proof of retrievability for
+ // a node of overlay 0x65xxx with a reserve depth of 1, i.e.,
+ // SOC address must start with zero bit
+ filterSOCAddr := func(a swarm.Address) bool {
+ return a.Bytes()[0]&0x80 != 0x00
+ }
+ // the filter function on the transformed address using the density estimation constant
+ filterTrAddr := func(a swarm.Address) (bool, error) {
+ m := new(big.Int).SetBytes(a.Bytes())
+ return m.Cmp(n) < 0, nil
+ }
+ // setup the signer with a private key from a fixture
+ data, err := hex.DecodeString("634fb5a872396d9693e5c9f9d7233cfa93f395c093371017ff44aa9ae6564cdd")
+ if err != nil {
+ t.Fatal(err)
+ }
+ privKey, err := crypto.DecodeSecp256k1PrivateKey(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ signer := crypto.NewDefaultSigner(privKey)
- sampleSize := 16
- // for sanity check: given a filterSOCAddr requiring a 0 leading bit (chance of 1/2)
- // we expect an overall rough 4 million chunks to be mined to create this sample
- // for 8 workers that is half a million round on average per worker
- err = makeChunks(t, signer, sampleSize, filterSOCAddr, filterTrAddr, trHasher)
- if err != nil {
- t.Fatal(err)
- }
+ sampleSize := 16
+ // for sanity check: given a filterSOCAddr requiring a 0 leading bit (chance of 1/2)
+ // we expect an overall rough 4 million chunks to be mined to create this sample
+ // for 8 workers that is half a million round on average per worker
+ err = makeChunks(t, signer, sampleSize, filterSOCAddr, filterTrAddr, trHasherPool)
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
}
-func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAddr func(swarm.Address) bool, filterTrAddr func(swarm.Address) (bool, error), trHasher func() hash.Hash) error {
+func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAddr func(swarm.Address) bool, filterTrAddr func(swarm.Address) (bool, error), trHasherPool *bmt.Pool) error {
t.Helper()
// set owner address from signer
@@ -110,7 +113,7 @@ func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAdd
// the main loop terminating after sampleSize SOCs have been generated
eg.Go(func() error {
defer cancel()
- for i := 0; i < sampleSize; i++ {
+ for i := range sampleSize {
select {
case sample[i] = <-sampleC:
case <-ectx.Done():
@@ -124,11 +127,13 @@ func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAdd
// loop to start mining workers
count := 8 // number of parallel workers
wg := sync.WaitGroup{}
- for i := 0; i < count; i++ {
+ for i := range count {
wg.Add(1)
eg.Go(func() (err error) {
offset := i * 4
found := 0
+ // Get one hasher per goroutine from the pool to avoid race conditions
+ hasher := trHasherPool.Get()
for seed := uint32(1); ; seed++ {
select {
case <-ectx.Done():
@@ -148,9 +153,9 @@ func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAdd
if !filterSOCAddr(addr) {
continue
}
- hasher := trHasher()
data := s.WrappedChunk().Data()
- hasher.(*bmt.Hasher).SetHeader(data[:8])
+ hasher.Reset()
+ hasher.SetHeader(data[:8])
_, err = hasher.Write(data[8:])
if err != nil {
return err
diff --git a/pkg/storer/cachestore.go b/pkg/storer/cachestore.go
index 6cc51e5550e..9a894a8683c 100644
--- a/pkg/storer/cachestore.go
+++ b/pkg/storer/cachestore.go
@@ -39,10 +39,9 @@ func (db *DB) cacheWorker(ctx context.Context) {
continue
}
- evict := uint64(size - capc)
- if evict < db.reserveOptions.cacheMinEvictCount { // evict at least a min count
- evict = db.reserveOptions.cacheMinEvictCount
- }
+ evict := max(uint64(size-capc),
+ // evict at least a min count
+ db.reserveOptions.cacheMinEvictCount)
dur := captureDuration(time.Now())
err := db.cacheObj.RemoveOldest(ctx, db.storage, evict)
diff --git a/pkg/storer/compact.go b/pkg/storer/compact.go
index 20bf9e027e9..32a3f60dc67 100644
--- a/pkg/storer/compact.go
+++ b/pkg/storer/compact.go
@@ -51,7 +51,7 @@ func Compact(ctx context.Context, basePath string, opts *Options, validate bool)
n := time.Now()
- for shard := 0; shard < sharkyNoOfShards; shard++ {
+ for shard := range sharkyNoOfShards {
select {
case <-ctx.Done():
diff --git a/pkg/storer/compact_test.go b/pkg/storer/compact_test.go
index 3fb58f22377..deb8b7412c4 100644
--- a/pkg/storer/compact_test.go
+++ b/pkg/storer/compact_test.go
@@ -37,14 +37,15 @@ func TestCompact(t *testing.T) {
}
st.StartReserveWorker(ctx, pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
- var chunks []swarm.Chunk
batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()}
evictBatch := batches[1]
putter := st.ReservePutter()
- for b := 0; b < len(batches); b++ {
- for i := uint64(0); i < 100; i++ {
+ chunks := make([]swarm.Chunk, 0, len(batches)*100)
+
+ for b := range batches {
+ for range uint64(100) {
ch := chunk.GenerateTestRandomChunk()
ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID))
chunks = append(chunks, ch)
@@ -81,7 +82,7 @@ func TestCompact(t *testing.T) {
}
putter = st.ReservePutter()
- for i := uint64(0); i < 100; i++ {
+ for range uint64(100) {
ch := chunk.GenerateTestRandomChunk()
ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[0].ID))
chunks = append(chunks, ch)
@@ -135,13 +136,14 @@ func TestCompactNoEvictions(t *testing.T) {
}
st.StartReserveWorker(ctx, pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
- var chunks []swarm.Chunk
batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()}
putter := st.ReservePutter()
- for b := 0; b < len(batches); b++ {
- for i := uint64(0); i < 100; i++ {
+ chunks := make([]swarm.Chunk, 0, len(batches)*100)
+
+ for b := range batches {
+ for range uint64(100) {
ch := chunk.GenerateTestRandomChunk()
ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID))
chunks = append(chunks, ch)
@@ -167,7 +169,7 @@ func TestCompactNoEvictions(t *testing.T) {
}
putter = st.ReservePutter()
- for i := uint64(0); i < 100; i++ {
+ for range uint64(100) {
ch := chunk.GenerateTestRandomChunk()
ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[0].ID))
chunks = append(chunks, ch)
diff --git a/pkg/storer/debug_test.go b/pkg/storer/debug_test.go
index 1e8149ba795..8e68b8ac972 100644
--- a/pkg/storer/debug_test.go
+++ b/pkg/storer/debug_test.go
@@ -157,7 +157,7 @@ func testDebugInfo(t *testing.T, newStorer func() (*storer.DB, swarm.Address, er
putter := lstore.ReservePutter()
- for i := 0; i < 10; i++ {
+ for range 10 {
chunk := chunktest.GenerateTestRandomChunkAt(t, addr, 0)
err := putter.Put(context.Background(), chunk)
if err != nil {
diff --git a/pkg/storer/export_test.go b/pkg/storer/export_test.go
index 83c0bd62657..b30b5996edd 100644
--- a/pkg/storer/export_test.go
+++ b/pkg/storer/export_test.go
@@ -22,11 +22,11 @@ func ReplaceSharkyShardLimit(val int) {
}
func (db *DB) WaitForBgCacheWorkers() (unblock func()) {
- for i := 0; i < defaultBgCacheWorkers; i++ {
+ for range defaultBgCacheWorkers {
db.cacheLimiter.sem <- struct{}{}
}
return func() {
- for i := 0; i < defaultBgCacheWorkers; i++ {
+ for range defaultBgCacheWorkers {
<-db.cacheLimiter.sem
}
}
diff --git a/pkg/storer/internal/cache/cache_test.go b/pkg/storer/internal/cache/cache_test.go
index 79536960d94..580263cb6ac 100644
--- a/pkg/storer/internal/cache/cache_test.go
+++ b/pkg/storer/internal/cache/cache_test.go
@@ -211,7 +211,7 @@ func TestCache(t *testing.T) {
})
t.Run("not in chunkstore returns error", func(t *testing.T) {
- for i := 0; i < 5; i++ {
+ for range 5 {
unknownChunk := chunktest.GenerateTestRandomChunk()
_, err := c.Getter(st).Get(context.TODO(), unknownChunk.Address())
if !errors.Is(err, storage.ErrNotFound) {
@@ -223,7 +223,7 @@ func TestCache(t *testing.T) {
t.Run("not in cache doesn't affect state", func(t *testing.T) {
state := c.State(st.IndexStore())
- for i := 0; i < 5; i++ {
+ for range 5 {
extraChunk := chunktest.GenerateTestRandomChunk()
err := st.Run(context.Background(), func(s transaction.Store) error {
return s.ChunkStore().Put(context.TODO(), extraChunk)
diff --git a/pkg/storer/internal/chunkstore/chunkstore_test.go b/pkg/storer/internal/chunkstore/chunkstore_test.go
index 6787cf826ab..970e92df9b8 100644
--- a/pkg/storer/internal/chunkstore/chunkstore_test.go
+++ b/pkg/storer/internal/chunkstore/chunkstore_test.go
@@ -109,7 +109,7 @@ type memFS struct {
}
func (m *memFS) Open(path string) (fs.File, error) {
- return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644)
+ return m.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644)
}
func TestChunkStore(t *testing.T) {
diff --git a/pkg/storer/internal/events/subscribe_test.go b/pkg/storer/internal/events/subscribe_test.go
index 3e99494cd06..f4adefaea2f 100644
--- a/pkg/storer/internal/events/subscribe_test.go
+++ b/pkg/storer/internal/events/subscribe_test.go
@@ -6,54 +6,55 @@ package events_test
import (
"testing"
+ "testing/synctest"
"time"
"github.com/ethersphere/bee/v2/pkg/storer/internal/events"
)
func TestSubscriber(t *testing.T) {
- t.Parallel()
-
- s := events.NewSubscriber()
-
- bin0_1, unsub0_1 := s.Subscribe("0")
- bin0_2, unsub0_2 := s.Subscribe("0")
- t.Cleanup(func() { unsub0_1(); unsub0_2() })
- go s.Trigger("0")
-
- gotSignals := make(chan struct{})
-
- go func() {
- defer close(gotSignals)
- <-bin0_1
- <-bin0_2
- }()
-
- select {
- case <-gotSignals:
- case <-time.After(time.Second):
- t.Fatal("signals did not fire in time")
- }
-
- select {
- case <-bin0_1:
- t.Fatalf("trigger should not have fired again")
- case <-bin0_2:
- t.Fatalf("trigger should not have fired again")
- default:
- }
-
- bin1, unsub1 := s.Subscribe("1")
- go s.Trigger("1")
- go s.Trigger("1")
- <-bin1
- <-bin1
-
- unsub1()
-
- select {
- case <-bin1:
- t.Fatalf("trigger should not have fired again")
- default:
- }
+ synctest.Test(t, func(t *testing.T) {
+ s := events.NewSubscriber()
+
+ bin0_1, unsub0_1 := s.Subscribe("0")
+ bin0_2, unsub0_2 := s.Subscribe("0")
+ t.Cleanup(func() { unsub0_1(); unsub0_2() })
+ go s.Trigger("0")
+
+ gotSignals := make(chan struct{})
+
+ go func() {
+ defer close(gotSignals)
+ <-bin0_1
+ <-bin0_2
+ }()
+
+ select {
+ case <-gotSignals:
+ case <-time.After(time.Second):
+ t.Fatal("signals did not fire in time")
+ }
+
+ select {
+ case <-bin0_1:
+ t.Fatalf("trigger should not have fired again")
+ case <-bin0_2:
+ t.Fatalf("trigger should not have fired again")
+ default:
+ }
+
+ bin1, unsub1 := s.Subscribe("1")
+ go s.Trigger("1")
+ go s.Trigger("1")
+ <-bin1
+ <-bin1
+
+ unsub1()
+
+ select {
+ case <-bin1:
+ t.Fatalf("trigger should not have fired again")
+ default:
+ }
+ })
}
diff --git a/pkg/storer/internal/pinning/pinning.go b/pkg/storer/internal/pinning/pinning.go
index 04689a7eec6..01abe264fc8 100644
--- a/pkg/storer/internal/pinning/pinning.go
+++ b/pkg/storer/internal/pinning/pinning.go
@@ -207,6 +207,32 @@ func HasPin(st storage.Reader, root swarm.Address) (bool, error) {
return has, nil
}
+// GetCollectionUUIDs returns all collection UUIDs from pin collections.
+func GetCollectionUUIDs(st storage.Reader) ([][]byte, error) {
+ var collectionUUIDs [][]byte
+ err := st.Iterate(storage.Query{
+ Factory: func() storage.Item { return &pinCollectionItem{} },
+ }, func(r storage.Result) (bool, error) {
+ collection := r.Entry.(*pinCollectionItem)
+ collectionUUIDs = append(collectionUUIDs, collection.UUID)
+ return false, nil
+ })
+ if err != nil {
+ return nil, fmt.Errorf("pin store: failed getting collections: %w", err)
+ }
+ return collectionUUIDs, nil
+}
+
+// IsChunkPinnedInCollection checks if a chunk address is pinned under the given collection uuid.
+func IsChunkPinnedInCollection(st storage.Reader, chunkAddr swarm.Address, uuid []byte) (bool, error) {
+ chunkItem := &pinChunkItem{UUID: uuid, Addr: chunkAddr}
+ has, err := st.Has(chunkItem)
+ if err != nil {
+ return false, fmt.Errorf("pin store: failed checking chunk pin status: %w", err)
+ }
+ return has, nil
+}
+
// Pins lists all the added pinning collections.
func Pins(st storage.Reader) ([]swarm.Address, error) {
pins := make([]swarm.Address, 0)
diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go
index 80e301e72a8..e90fa27b999 100644
--- a/pkg/storer/internal/reserve/reserve.go
+++ b/pkg/storer/internal/reserve/reserve.go
@@ -19,6 +19,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/postage"
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstamp"
+ pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
"github.com/ethersphere/bee/v2/pkg/storer/internal/stampindex"
"github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
"github.com/ethersphere/bee/v2/pkg/swarm"
@@ -327,6 +328,7 @@ func (r *Reserve) Get(ctx context.Context, addr swarm.Address, batchID []byte, s
}
// EvictBatchBin evicts all chunks from bins upto the bin provided.
+// Pinned chunks are protected from eviction to maintain data integrity.
func (r *Reserve) EvictBatchBin(
ctx context.Context,
batchID []byte,
@@ -336,13 +338,21 @@ func (r *Reserve) EvictBatchBin(
r.multx.Lock(string(batchID))
defer r.multx.Unlock(string(batchID))
- var evicteditems []*BatchRadiusItem
+ var (
+ evictedItems []*BatchRadiusItem
+ pinnedEvictedItems []*BatchRadiusItem
+ )
if count <= 0 {
return 0, nil
}
- err := r.st.IndexStore().Iterate(storage.Query{
+ pinUuids, err := pinstore.GetCollectionUUIDs(r.st.IndexStore())
+ if err != nil {
+ return 0, err
+ }
+
+ err = r.st.IndexStore().Iterate(storage.Query{
Factory: func() storage.Item { return &BatchRadiusItem{} },
Prefix: string(batchID),
}, func(res storage.Result) (bool, error) {
@@ -350,7 +360,24 @@ func (r *Reserve) EvictBatchBin(
if batchRadius.Bin >= bin {
return true, nil
}
- evicteditems = append(evicteditems, batchRadius)
+
+ // Check if the chunk is pinned in any collection
+ pinned := false
+ for _, uuid := range pinUuids {
+ has, err := pinstore.IsChunkPinnedInCollection(r.st.IndexStore(), batchRadius.Address, uuid)
+ if err != nil {
+ return true, err
+ }
+ if has {
+ pinned = true
+ pinnedEvictedItems = append(pinnedEvictedItems, batchRadius)
+ break
+ }
+ }
+
+ if !pinned {
+ evictedItems = append(evictedItems, batchRadius)
+ }
count--
if count == 0 {
return true, nil
@@ -366,7 +393,7 @@ func (r *Reserve) EvictBatchBin(
var evicted atomic.Int64
- for _, item := range evicteditems {
+ for _, item := range evictedItems {
func(item *BatchRadiusItem) {
eg.Go(func() error {
err := r.st.Run(ctx, func(s transaction.Store) error {
@@ -381,6 +408,21 @@ func (r *Reserve) EvictBatchBin(
}(item)
}
+ for _, item := range pinnedEvictedItems {
+ func(item *BatchRadiusItem) {
+ eg.Go(func() error {
+ err := r.st.Run(ctx, func(s transaction.Store) error {
+ return RemoveChunkMetaData(ctx, s, item)
+ })
+ if err != nil {
+ return err
+ }
+ evicted.Add(1)
+ return nil
+ })
+ }(item)
+ }
+
err = eg.Wait()
r.size.Add(-evicted.Load())
@@ -430,6 +472,29 @@ func RemoveChunkWithItem(
)
}
+// RemoveChunkMetaData removes chunk reserve metadata from reserve indexes but keeps the cunks in the chunkstore.
+// used at pinned data eviction
+func RemoveChunkMetaData(
+ ctx context.Context,
+ trx transaction.Store,
+ item *BatchRadiusItem,
+) error {
+ var errs error
+
+ stamp, _ := chunkstamp.LoadWithStampHash(trx.IndexStore(), reserveScope, item.Address, item.StampHash)
+ if stamp != nil {
+ errs = errors.Join(
+ stampindex.Delete(trx.IndexStore(), reserveScope, stamp),
+ chunkstamp.DeleteWithStamp(trx.IndexStore(), reserveScope, item.Address, stamp),
+ )
+ }
+
+ return errors.Join(errs,
+ trx.IndexStore().Delete(item),
+ trx.IndexStore().Delete(&ChunkBinItem{Bin: item.Bin, BinID: item.BinID}),
+ )
+}
+
func (r *Reserve) IterateBin(bin uint8, startBinID uint64, cb func(swarm.Address, uint64, []byte, []byte) (bool, error)) error {
err := r.st.IndexStore().Iterate(storage.Query{
Factory: func() storage.Item { return &ChunkBinItem{} },
@@ -569,7 +634,7 @@ func (r *Reserve) Reset(ctx context.Context) error {
// step 4: delete binItems
err = r.st.Run(context.Background(), func(s transaction.Store) error {
- for i := uint8(0); i < swarm.MaxBins; i++ {
+ for i := range swarm.MaxBins {
err := s.IndexStore().Delete(&BinItem{Bin: i})
if err != nil {
return err
@@ -626,7 +691,7 @@ func (r *Reserve) LastBinIDs() ([]uint64, uint64, error) {
ids := make([]uint64, swarm.MaxBins)
- for bin := uint8(0); bin < swarm.MaxBins; bin++ {
+ for bin := range swarm.MaxBins {
binItem := &BinItem{Bin: bin}
err := r.st.IndexStore().Get(binItem)
if err != nil {
diff --git a/pkg/storer/internal/reserve/reserve_test.go b/pkg/storer/internal/reserve/reserve_test.go
index 95218e992f6..87f700a98c8 100644
--- a/pkg/storer/internal/reserve/reserve_test.go
+++ b/pkg/storer/internal/reserve/reserve_test.go
@@ -11,7 +11,7 @@ import (
"math"
"math/rand"
"testing"
- "time"
+ "testing/synctest"
"github.com/ethersphere/bee/v2/pkg/crypto"
"github.com/ethersphere/bee/v2/pkg/log"
@@ -22,6 +22,7 @@ import (
chunk "github.com/ethersphere/bee/v2/pkg/storage/testing"
"github.com/ethersphere/bee/v2/pkg/storer/internal"
"github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstamp"
+ pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
"github.com/ethersphere/bee/v2/pkg/storer/internal/reserve"
"github.com/ethersphere/bee/v2/pkg/storer/internal/stampindex"
"github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
@@ -47,7 +48,7 @@ func TestReserve(t *testing.T) {
t.Fatal(err)
}
- for b := 0; b < 2; b++ {
+ for b := range 2 {
for i := 1; i < 51; i++ {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b)
err := r.Put(context.Background(), ch)
@@ -100,7 +101,7 @@ func TestReserveChunkType(t *testing.T) {
storedChunksCA := 0
storedChunksSO := 0
- for i := 0; i < 100; i++ {
+ for range 100 {
ch := chunk.GenerateTestRandomChunk()
if rand.Intn(2) == 0 {
storedChunksCA++
@@ -117,11 +118,12 @@ func TestReserveChunkType(t *testing.T) {
Factory: func() storage.Item { return &reserve.ChunkBinItem{} },
}, func(res storage.Result) (bool, error) {
item := res.Entry.(*reserve.ChunkBinItem)
- if item.ChunkType == swarm.ChunkTypeContentAddressed {
+ switch item.ChunkType {
+ case swarm.ChunkTypeContentAddressed:
storedChunksCA--
- } else if item.ChunkType == swarm.ChunkTypeSingleOwner {
+ case swarm.ChunkTypeSingleOwner:
storedChunksSO--
- } else {
+ default:
t.Fatalf("unexpected chunk type: %d", item.ChunkType)
}
return false, nil
@@ -529,77 +531,77 @@ func TestReplaceOldIndex(t *testing.T) {
}
func TestEvict(t *testing.T) {
- t.Parallel()
+ synctest.Test(t, func(t *testing.T) {
+ baseAddr := swarm.RandAddress(t)
- baseAddr := swarm.RandAddress(t)
+ ts := internal.NewInmemStorage()
- ts := internal.NewInmemStorage()
+ chunksPerBatch := 50
+ var chunks []swarm.Chunk
+ batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()}
+ evictBatch := batches[1]
- chunksPerBatch := 50
- var chunks []swarm.Chunk
- batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()}
- evictBatch := batches[1]
+ r, err := reserve.New(
+ baseAddr,
+ ts,
+ 0, kademlia.NewTopologyDriver(),
+ log.Noop,
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
- r, err := reserve.New(
- baseAddr,
- ts,
- 0, kademlia.NewTopologyDriver(),
- log.Noop,
- )
- if err != nil {
- t.Fatal(err)
- }
+ for range chunksPerBatch {
+ for b := range 3 {
+ ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b).WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID))
+ chunks = append(chunks, ch)
+ err := r.Put(context.Background(), ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
- for i := 0; i < chunksPerBatch; i++ {
- for b := 0; b < 3; b++ {
- ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b).WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID))
- chunks = append(chunks, ch)
- err := r.Put(context.Background(), ch)
+ totalEvicted := 0
+ for i := range 3 {
+ evicted, err := r.EvictBatchBin(context.Background(), evictBatch.ID, math.MaxInt, uint8(i))
if err != nil {
t.Fatal(err)
}
+ totalEvicted += evicted
}
- }
- totalEvicted := 0
- for i := 0; i < 3; i++ {
- evicted, err := r.EvictBatchBin(context.Background(), evictBatch.ID, math.MaxInt, uint8(i))
- if err != nil {
- t.Fatal(err)
+ if totalEvicted != chunksPerBatch {
+ t.Fatalf("got %d, want %d", totalEvicted, chunksPerBatch)
}
- totalEvicted += evicted
- }
-
- if totalEvicted != chunksPerBatch {
- t.Fatalf("got %d, want %d", totalEvicted, chunksPerBatch)
- }
- time.Sleep(time.Second)
+ synctest.Wait()
- for i, ch := range chunks {
- binID := i%chunksPerBatch + 1
- b := swarm.Proximity(baseAddr.Bytes(), ch.Address().Bytes())
- stampHash, err := ch.Stamp().Hash()
- if err != nil {
- t.Fatal(err)
- }
- _, err = r.Get(context.Background(), ch.Address(), ch.Stamp().BatchID(), stampHash)
- if bytes.Equal(ch.Stamp().BatchID(), evictBatch.ID) {
- if !errors.Is(err, storage.ErrNotFound) {
- t.Fatalf("got err %v, want %v", err, storage.ErrNotFound)
- }
- checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, true)
- checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID), StampHash: stampHash}, true)
- checkChunk(t, ts, ch, true)
- } else {
+ for i, ch := range chunks {
+ binID := i%chunksPerBatch + 1
+ b := swarm.Proximity(baseAddr.Bytes(), ch.Address().Bytes())
+ stampHash, err := ch.Stamp().Hash()
if err != nil {
t.Fatal(err)
}
- checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, false)
- checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID), StampHash: stampHash}, false)
- checkChunk(t, ts, ch, false)
+ _, err = r.Get(context.Background(), ch.Address(), ch.Stamp().BatchID(), stampHash)
+ if bytes.Equal(ch.Stamp().BatchID(), evictBatch.ID) {
+ if !errors.Is(err, storage.ErrNotFound) {
+ t.Fatalf("got err %v, want %v", err, storage.ErrNotFound)
+ }
+ checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, true)
+ checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID), StampHash: stampHash}, true)
+ checkChunk(t, ts, ch, true)
+ } else {
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, false)
+ checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID), StampHash: stampHash}, false)
+ checkChunk(t, ts, ch, false)
+ }
}
- }
+ })
}
func TestEvictSOC(t *testing.T) {
@@ -621,9 +623,9 @@ func TestEvictSOC(t *testing.T) {
batch := postagetesting.MustNewBatch()
signer := getSigner(t)
- var chunks []swarm.Chunk
+ chunks := make([]swarm.Chunk, 0, 10)
- for i := 0; i < 10; i++ {
+ for i := range 10 {
ch := soctesting.GenerateMockSocWithSigner(t, []byte{byte(i)}, signer).Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, uint64(i), uint64(i)))
chunks = append(chunks, ch)
err := r.Put(context.Background(), ch)
@@ -692,8 +694,8 @@ func TestEvictMaxCount(t *testing.T) {
batch := postagetesting.MustNewBatch()
- for b := 0; b < 2; b++ {
- for i := 0; i < 10; i++ {
+ for b := range 2 {
+ for range 10 {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b).WithStamp(postagetesting.MustNewBatchStamp(batch.ID))
chunks = append(chunks, ch)
err := r.Put(context.Background(), ch)
@@ -748,8 +750,8 @@ func TestIterate(t *testing.T) {
t.Fatal(err)
}
- for b := 0; b < 3; b++ {
- for i := 0; i < 10; i++ {
+ for b := range 3 {
+ for range 10 {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b)
err := r.Put(context.Background(), ch)
if err != nil {
@@ -866,7 +868,7 @@ func TestReset(t *testing.T) {
total = bins * chunksPerBin
)
- for b := 0; b < bins; b++ {
+ for b := range bins {
for i := 1; i <= chunksPerBin; i++ {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b)
err := r.Put(context.Background(), ch)
@@ -966,6 +968,139 @@ func TestReset(t *testing.T) {
}
}
+// TestEvictRemovesPinnedContent checks that pinned chunks are protected from eviction.
+func TestEvictRemovesPinnedContent(t *testing.T) {
+ t.Parallel()
+
+ const (
+ numChunks = 5
+ numPinnedChunks = 3
+ )
+
+ ctx := context.Background()
+ baseAddr := swarm.RandAddress(t)
+ ts := internal.NewInmemStorage()
+
+ r, err := reserve.New(
+ baseAddr,
+ ts,
+ 0,
+ kademlia.NewTopologyDriver(),
+ log.Noop,
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ batch := postagetesting.MustNewBatch()
+
+ chunks := make([]swarm.Chunk, numChunks)
+ for i := range numChunks {
+ ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewBatchStamp(batch.ID))
+ chunks[i] = ch
+
+ err := r.Put(ctx, ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ var pinningPutter internal.PutterCloserWithReference
+ err = ts.Run(ctx, func(store transaction.Store) error {
+ pinningPutter, err = pinstore.NewCollection(store.IndexStore())
+ return err
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Add chunks to pin collection
+ pinnedChunks := chunks[:numPinnedChunks]
+ for _, ch := range pinnedChunks {
+ err = ts.Run(ctx, func(s transaction.Store) error {
+ return pinningPutter.Put(ctx, s, ch)
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ err = ts.Run(ctx, func(s transaction.Store) error {
+ return pinningPutter.Close(s.IndexStore(), pinnedChunks[0].Address())
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // evict all chunks from this batch - this should NOT remove pinned chunks
+ evicted, err := r.EvictBatchBin(ctx, batch.ID, numChunks, swarm.MaxBins)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if evicted != numChunks {
+ t.Fatalf("expected %d evicted chunks, got %d", numChunks, evicted)
+ }
+
+ uuids, err := pinstore.GetCollectionUUIDs(ts.IndexStore())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(uuids) != 1 {
+ t.Fatalf("expected exactly one pin collection, but found %d", len(uuids))
+ }
+
+ for i, ch := range chunks {
+ stampHash, err := ch.Stamp().Hash()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Try to get the chunk from reserve, error is checked later
+ _, err = r.Get(ctx, ch.Address(), ch.Stamp().BatchID(), stampHash)
+
+ // Also try to get chunk directly from chunkstore (like bzz/bytes endpoints do)
+ _, chunkStoreErr := ts.ChunkStore().Get(ctx, ch.Address())
+
+ pinned := false
+ for _, uuid := range uuids {
+ has, err := pinstore.IsChunkPinnedInCollection(ts.IndexStore(), ch.Address(), uuid)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if has {
+ pinned = true
+ }
+ }
+
+ if i < len(pinnedChunks) {
+ if pinned {
+ // This chunk is pinned, so it should NOT be accessible from reserve but SHOULD be accessible from chunkstore
+ if !errors.Is(err, storage.ErrNotFound) {
+ t.Errorf("Pinned chunk %s should have been evicted from reserve", ch.Address())
+ }
+ if errors.Is(chunkStoreErr, storage.ErrNotFound) {
+ t.Errorf("Pinned chunk %s was deleted from chunkstore - should remain retrievable!", ch.Address())
+ } else if chunkStoreErr != nil {
+ t.Fatal(chunkStoreErr)
+ }
+ } else {
+ t.Errorf("Chunk %s should be pinned", ch.Address())
+ }
+ } else { // unpinned chunks
+ if !pinned {
+ // Unpinned chunks should be completely evicted (both reserve and chunkstore)
+ if !errors.Is(err, storage.ErrNotFound) {
+ t.Errorf("Unpinned chunk %s should have been evicted from reserve", ch.Address())
+ }
+ if !errors.Is(chunkStoreErr, storage.ErrNotFound) {
+ t.Errorf("Unpinned chunk %s should have been evicted from chunkstore", ch.Address())
+ }
+ } else {
+ t.Errorf("Chunk %s should not be pinned", ch.Address())
+ }
+ }
+ }
+}
+
func checkStore(t *testing.T, s storage.Reader, k storage.Key, gone bool) {
t.Helper()
h, err := s.Has(k)
diff --git a/pkg/storer/internal/stampindex/stampindex.go b/pkg/storer/internal/stampindex/stampindex.go
index b4d70839731..528fd998251 100644
--- a/pkg/storer/internal/stampindex/stampindex.go
+++ b/pkg/storer/internal/stampindex/stampindex.go
@@ -138,7 +138,7 @@ func (i *Item) Clone() storage.Item {
// String implements the fmt.Stringer interface.
func (i Item) String() string {
- return storageutil.JoinFields(i.Namespace(), i.ID())
+ return storageutil.JoinFields(i.Namespace(), fmt.Sprintf("%s/%x/%x", string(i.scope), i.BatchID, i.StampIndex))
}
// LoadOrStore tries to first load a stamp index related record from the store.
diff --git a/pkg/storer/metrics.go b/pkg/storer/metrics.go
index 7b9295ee9f7..68be82ac039 100644
--- a/pkg/storer/metrics.go
+++ b/pkg/storer/metrics.go
@@ -17,22 +17,24 @@ import (
// metrics groups storer related prometheus counters.
type metrics struct {
- MethodCalls *prometheus.CounterVec
- MethodCallsDuration *prometheus.HistogramVec
- ReserveSize prometheus.Gauge
- ReserveSizeWithinRadius prometheus.Gauge
- ReserveCleanup prometheus.Counter
- StorageRadius prometheus.Gauge
- CacheSize prometheus.Gauge
- EvictedChunkCount prometheus.Counter
- ExpiredChunkCount prometheus.Counter
- OverCapTriggerCount prometheus.Counter
- ExpiredBatchCount prometheus.Counter
- LevelDBStats *prometheus.HistogramVec
- ExpiryTriggersCount prometheus.Counter
- ExpiryRunsCount prometheus.Counter
-
- ReserveMissingBatch prometheus.Gauge
+ MethodCalls *prometheus.CounterVec
+ MethodCallsDuration *prometheus.HistogramVec
+ ReserveSize prometheus.Gauge
+ ReserveSizeWithinRadius prometheus.Gauge
+ ReserveCleanup prometheus.Counter
+ StorageRadius prometheus.Gauge
+ CacheSize prometheus.Gauge
+ EvictedChunkCount prometheus.Counter
+ ExpiredChunkCount prometheus.Counter
+ OverCapTriggerCount prometheus.Counter
+ ExpiredBatchCount prometheus.Counter
+ LevelDBStats *prometheus.HistogramVec
+ ExpiryTriggersCount prometheus.Counter
+ ExpiryRunsCount prometheus.Counter
+ ReserveMissingBatch prometheus.Gauge
+ ReserveSampleDuration *prometheus.HistogramVec
+ ReserveSampleRunSummary *prometheus.GaugeVec
+ ReserveSampleLastRunTimestamp prometheus.Gauge
}
// newMetrics is a convenient constructor for creating new metrics.
@@ -163,6 +165,33 @@ func newMetrics() metrics {
Help: "Number of times the expiry worker was fired.",
},
),
+ ReserveSampleDuration: prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: m.Namespace,
+ Subsystem: subsystem,
+ Name: "reserve_sample_duration_seconds",
+ Help: "Duration of ReserveSample operations in seconds.",
+ Buckets: []float64{180, 300, 600, 900, 1200, 1500, 1800},
+ },
+ []string{"status"},
+ ),
+ ReserveSampleRunSummary: prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: m.Namespace,
+ Subsystem: subsystem,
+ Name: "reserve_sample_run_summary",
+ Help: "Summary metrics for the last ReserveSample run.",
+ },
+ []string{"metric"},
+ ),
+ ReserveSampleLastRunTimestamp: prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: m.Namespace,
+ Subsystem: subsystem,
+ Name: "reserve_sample_last_run_timestamp",
+ Help: "Unix timestamp of the last ReserveSample run completion.",
+ },
+ ),
}
}
diff --git a/pkg/storer/migration/refCntSize.go b/pkg/storer/migration/refCntSize.go
index 64f6f58b315..2075c50effb 100644
--- a/pkg/storer/migration/refCntSize.go
+++ b/pkg/storer/migration/refCntSize.go
@@ -124,10 +124,7 @@ func RefCountSizeInc(s storage.BatchStore, logger log.Logger) func() error {
}
for i := 0; i < len(itemsToDelete); i += 10000 {
- end := i + 10000
- if end > len(itemsToDelete) {
- end = len(itemsToDelete)
- }
+ end := min(i+10000, len(itemsToDelete))
b := s.Batch(context.Background())
for _, item := range itemsToDelete[i:end] {
diff --git a/pkg/storer/migration/refCntSize_test.go b/pkg/storer/migration/refCntSize_test.go
index bfbc8c687e7..d5e5424283e 100644
--- a/pkg/storer/migration/refCntSize_test.go
+++ b/pkg/storer/migration/refCntSize_test.go
@@ -24,8 +24,8 @@ func Test_RefCntSize(t *testing.T) {
store := inmemstore.New()
// simulate old cacheEntryItem with some random bytes.
- var oldItems []*localmigration.OldRetrievalIndexItem
- for i := 0; i < 10; i++ {
+ oldItems := make([]*localmigration.OldRetrievalIndexItem, 0, 10)
+ for range 10 {
entry := &localmigration.OldRetrievalIndexItem{
Address: swarm.RandAddress(t),
Timestamp: uint64(rand.Int()),
diff --git a/pkg/storer/migration/reserveRepair.go b/pkg/storer/migration/reserveRepair.go
index ae6838fc1e7..b8378d92550 100644
--- a/pkg/storer/migration/reserveRepair.go
+++ b/pkg/storer/migration/reserveRepair.go
@@ -84,7 +84,7 @@ func ReserveRepairer(
// STEP 1
err = st.Run(context.Background(), func(s transaction.Store) error {
- for i := uint8(0); i < swarm.MaxBins; i++ {
+ for i := range swarm.MaxBins {
err := s.IndexStore().Delete(&reserve.BinItem{Bin: i})
if err != nil {
return err
@@ -117,10 +117,7 @@ func ReserveRepairer(
batchSize := 1000
for i := 0; i < len(chunkBinItems); i += batchSize {
- end := i + batchSize
- if end > len(chunkBinItems) {
- end = len(chunkBinItems)
- }
+ end := min(i+batchSize, len(chunkBinItems))
err := st.Run(context.Background(), func(s transaction.Store) error {
for _, item := range chunkBinItems[i:end] {
err := s.IndexStore().Delete(item)
diff --git a/pkg/storer/migration/reserveRepair_test.go b/pkg/storer/migration/reserveRepair_test.go
index 884664c8787..b60f51769eb 100644
--- a/pkg/storer/migration/reserveRepair_test.go
+++ b/pkg/storer/migration/reserveRepair_test.go
@@ -33,15 +33,15 @@ func TestReserveRepair(t *testing.T) {
var chunksPO = make([][]swarm.Chunk, 5)
var chunksPerPO uint64 = 2
- for i := uint8(0); i < swarm.MaxBins; i++ {
+ for i := range swarm.MaxBins {
err := store.Run(context.Background(), func(s transaction.Store) error {
return s.IndexStore().Put(&reserve.BinItem{Bin: i, BinID: 10})
})
assert.NoError(t, err)
}
- for b := 0; b < 5; b++ {
- for i := uint64(0); i < chunksPerPO; i++ {
+ for b := range 5 {
+ for range chunksPerPO {
ch := chunktest.GenerateTestRandomChunkAt(t, baseAddr, b)
stampHash, err := ch.Stamp().Hash()
if err != nil {
@@ -115,7 +115,7 @@ func TestReserveRepair(t *testing.T) {
)
assert.NoError(t, err)
- for b := 0; b < 5; b++ {
+ for b := range 5 {
if b < 2 {
if _, found := binIDs[uint8(b)]; found {
t.Fatalf("bin %d should not have any binIDs", b)
diff --git a/pkg/storer/migration/step_02_test.go b/pkg/storer/migration/step_02_test.go
index 32cfa9abe4d..c1c741ac2fa 100644
--- a/pkg/storer/migration/step_02_test.go
+++ b/pkg/storer/migration/step_02_test.go
@@ -54,8 +54,8 @@ func Test_Step_02(t *testing.T) {
store := internal.NewInmemStorage()
// simulate old cacheEntryItem with some random bytes.
- var addrs []*testEntry
- for i := 0; i < 10; i++ {
+ addrs := make([]*testEntry, 0, 10)
+ for range 10 {
entry := &testEntry{address: swarm.RandAddress(t)}
addrs = append(addrs, entry)
err := store.Run(context.Background(), func(s transaction.Store) error {
diff --git a/pkg/storer/migration/step_04_test.go b/pkg/storer/migration/step_04_test.go
index 758ddc7987f..fcf54225af4 100644
--- a/pkg/storer/migration/step_04_test.go
+++ b/pkg/storer/migration/step_04_test.go
@@ -84,7 +84,7 @@ func Test_Step_04(t *testing.T) {
_, err = f.Read(buf)
assert.NoError(t, err)
- for i := 0; i < 10; i++ {
+ for i := range 10 {
if i < 2 {
// if the chunk is deleted, the bit is set to 1
assert.Greater(t, buf[i/8]&(1<<(i%8)), byte(0))
diff --git a/pkg/storer/mock/mockreserve.go b/pkg/storer/mock/mockreserve.go
index ba8f13590ef..7ac74ee49ce 100644
--- a/pkg/storer/mock/mockreserve.go
+++ b/pkg/storer/mock/mockreserve.go
@@ -152,6 +152,12 @@ func (s *ReserveStore) CommittedDepth() uint8 {
return s.radius + uint8(s.capacityDoubling)
}
+func (s *ReserveStore) CapacityDoubling() uint8 {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ return uint8(s.capacityDoubling)
+}
+
// IntervalChunks returns a set of chunk in a requested interval.
func (s *ReserveStore) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *storer.BinC, func(), <-chan error) {
s.mtx.Lock()
diff --git a/pkg/storer/mock/mockstorer.go b/pkg/storer/mock/mockstorer.go
index d0b5b7e6ad0..efdce20bbf4 100644
--- a/pkg/storer/mock/mockstorer.go
+++ b/pkg/storer/mock/mockstorer.go
@@ -28,6 +28,9 @@ type mockStorer struct {
activeSessions map[uint64]*storer.SessionInfo
chunkPushC chan *pusher.Op
debugInfo storer.Info
+
+ storageRadius uint8
+ committedDepth uint8
}
type putterSession struct {
@@ -218,9 +221,13 @@ func (m *mockStorer) ChunkStore() storage.ReadOnlyChunkStore {
return m.chunkStore
}
-func (m *mockStorer) StorageRadius() uint8 { return 0 }
+func (m *mockStorer) StorageRadius() uint8 { return m.storageRadius }
+
+func (m *mockStorer) CommittedDepth() uint8 { return m.committedDepth }
-func (m *mockStorer) CommittedDepth() uint8 { return 0 }
+func (m *mockStorer) CapacityDoubling() uint8 {
+ return m.committedDepth - m.storageRadius
+}
func (m *mockStorer) IsWithinStorageRadius(_ swarm.Address) bool { return true }
@@ -235,3 +242,15 @@ func (m *mockStorer) NeighborhoodsStat(ctx context.Context) ([]*storer.Neighborh
func (m *mockStorer) Put(ctx context.Context, ch swarm.Chunk) error {
return m.chunkStore.Put(ctx, ch)
}
+
+func (m *mockStorer) SetStorageRadius(radius uint8) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.storageRadius = radius
+}
+
+func (m *mockStorer) SetCommittedDepth(depth uint8) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.committedDepth = depth
+}
diff --git a/pkg/storer/reserve.go b/pkg/storer/reserve.go
index 329e9461f50..74a3b4286ce 100644
--- a/pkg/storer/reserve.go
+++ b/pkg/storer/reserve.go
@@ -369,10 +369,9 @@ func (db *DB) unreserve(ctx context.Context) (err error) {
default:
}
- evict := target - totalEvicted
- if evict < int(db.reserveOptions.minEvictCount) { // evict at least a min count
- evict = int(db.reserveOptions.minEvictCount)
- }
+ evict := max(target-totalEvicted,
+ // evict at least a min count
+ int(db.reserveOptions.minEvictCount))
binEvicted, err := db.evictBatch(ctx, b, evict, radius)
// eviction happens in batches, so we need to keep track of the total
@@ -428,6 +427,13 @@ func (db *DB) CommittedDepth() uint8 {
return uint8(db.reserveOptions.capacityDoubling) + db.reserve.Radius()
}
+func (db *DB) CapacityDoubling() uint8 {
+ if db.reserve == nil {
+ return 0
+ }
+ return uint8(db.reserveOptions.capacityDoubling)
+}
+
func (db *DB) ReserveSize() int {
if db.reserve == nil {
return 0
@@ -460,9 +466,7 @@ func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan
done := make(chan struct{})
errC := make(chan error, 1)
- db.inFlight.Add(1)
- go func() {
- defer db.inFlight.Done()
+ db.inFlight.Go(func() {
trigger, unsub := db.reserveBinEvents.Subscribe(string(bin))
defer unsub()
@@ -501,7 +505,7 @@ func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan
return
}
}
- }()
+ })
var doneOnce sync.Once
return out, func() {
@@ -549,7 +553,7 @@ func neighborhoodPrefixes(base swarm.Address, radius int, suffixLength int) []sw
bitCombinationsCount := int(math.Pow(2, float64(suffixLength)))
bitSuffixes := make([]uint8, bitCombinationsCount)
- for i := 0; i < bitCombinationsCount; i++ {
+ for i := range bitCombinationsCount {
bitSuffixes[i] = uint8(i)
}
diff --git a/pkg/storer/reserve_test.go b/pkg/storer/reserve_test.go
index c354f92cdbe..3126a45015c 100644
--- a/pkg/storer/reserve_test.go
+++ b/pkg/storer/reserve_test.go
@@ -198,8 +198,8 @@ func TestEvictBatch(t *testing.T) {
putter := st.ReservePutter()
- for b := 0; b < 3; b++ {
- for i := uint64(0); i < chunksPerPO; i++ {
+ for b := range 3 {
+ for range chunksPerPO {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b)
ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID))
chunks = append(chunks, ch)
@@ -290,8 +290,8 @@ func TestUnreserveCap(t *testing.T) {
c, unsub := storer.Events().Subscribe("reserveUnreserved")
defer unsub()
- for b := 0; b < 5; b++ {
- for i := uint64(0); i < chunksPerPO; i++ {
+ for b := range 5 {
+ for range chunksPerPO {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b)
ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batch.ID))
chunksPO[b] = append(chunksPO[b], ch)
@@ -438,8 +438,8 @@ func TestRadiusManager(t *testing.T) {
putter := storer.ReservePutter()
- for i := 0; i < 4; i++ {
- for j := 0; j < 10; j++ {
+ for i := range 4 {
+ for range 10 {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, i).WithStamp(postagetesting.MustNewBatchStamp(batch.ID))
err := putter.Put(context.Background(), ch)
if err != nil {
@@ -480,8 +480,8 @@ func TestSubscribeBin(t *testing.T) {
putter = storer.ReservePutter()
)
- for j := 0; j < 2; j++ {
- for i := uint64(0); i < chunksPerPO; i++ {
+ for j := range 2 {
+ for range chunksPerPO {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, j)
chunks = append(chunks, ch)
err := putter.Put(context.Background(), ch)
@@ -597,8 +597,8 @@ func TestSubscribeBinTrigger(t *testing.T) {
)
putter := storer.ReservePutter()
- for j := 0; j < 2; j++ {
- for i := uint64(0); i < chunksPerPO; i++ {
+ for j := range 2 {
+ for range chunksPerPO {
ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, j)
chunks = append(chunks, ch)
err := putter.Put(context.Background(), ch)
@@ -694,7 +694,7 @@ func TestNeighborhoodStats(t *testing.T) {
putChunks := func(addr swarm.Address, startingRadius int, st *storer.DB) {
putter := st.ReservePutter()
- for i := 0; i < chunkCountPerPO; i++ {
+ for range chunkCountPerPO {
ch := chunk.GenerateValidRandomChunkAt(t, addr, startingRadius)
err := putter.Put(context.Background(), ch)
if err != nil {
diff --git a/pkg/storer/sample.go b/pkg/storer/sample.go
index e02027ce00b..43999429300 100644
--- a/pkg/storer/sample.go
+++ b/pkg/storer/sample.go
@@ -65,7 +65,6 @@ func (db *DB) ReserveSample(
consensusTime uint64,
minBatchBalance *big.Int,
) (Sample, error) {
-
g, ctx := errgroup.WithContext(ctx)
allStats := &SampleStats{}
@@ -76,8 +75,15 @@ func (db *DB) ReserveSample(
statsLock.Unlock()
}
+ workers := max(4, runtime.NumCPU())
t := time.Now()
+ defer func() {
+ duration := time.Since(t)
+ err := g.Wait()
+ db.recordReserveSampleMetrics(duration, allStats, workers, err)
+ }()
+
excludedBatchIDs, err := db.batchesBelowValue(minBatchBalance)
if err != nil {
db.logger.Error(err, "get batches below value")
@@ -85,7 +91,7 @@ func (db *DB) ReserveSample(
allStats.BatchesBelowValueDuration = time.Since(t)
- chunkC := make(chan *reserve.ChunkBinItem)
+ chunkC := make(chan *reserve.ChunkBinItem, 3*workers)
// Phase 1: Iterate chunk addresses
g.Go(func() error {
@@ -113,16 +119,15 @@ func (db *DB) ReserveSample(
})
// Phase 2: Get the chunk data and calculate transformed hash
- sampleItemChan := make(chan SampleItem)
+ sampleItemChan := make(chan SampleItem, 3*workers)
prefixHasherFactory := func() hash.Hash {
return swarm.NewPrefixHasher(anchor)
}
- workers := max(4, runtime.NumCPU())
db.logger.Debug("reserve sampler workers", "count", workers)
- for i := 0; i < workers; i++ {
+ for range workers {
g.Go(func() error {
wstat := SampleStats{}
hasher := bmt.NewHasher(prefixHasherFactory)
@@ -134,7 +139,6 @@ func (db *DB) ReserveSample(
// exclude chunks who's batches balance are below minimum
if _, found := excludedBatchIDs[string(chItem.BatchID)]; found {
wstat.BelowBalanceIgnored++
-
continue
}
@@ -148,13 +152,15 @@ func (db *DB) ReserveSample(
chunkLoadStart := time.Now()
chunk, err := db.ChunkStore().Get(ctx, chItem.Address)
+ chunkLoadDuration := time.Since(chunkLoadStart)
+
if err != nil {
wstat.ChunkLoadFailed++
db.logger.Debug("failed loading chunk", "chunk_address", chItem.Address, "error", err)
continue
}
- wstat.ChunkLoadDuration += time.Since(chunkLoadStart)
+ wstat.ChunkLoadDuration += chunkLoadDuration
taddrStart := time.Now()
taddr, err := transformedAddress(hasher, chunk, chItem.ChunkType)
@@ -225,8 +231,6 @@ func (db *DB) ReserveSample(
}
if le(item.TransformedAddress, currentMaxAddr) || len(sampleItems) < SampleSize {
- start := time.Now()
-
stamp, err := chunkstamp.LoadWithBatchID(db.storage.IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID())
if err != nil {
stats.StampLoadFailed++
@@ -242,13 +246,15 @@ func (db *DB) ReserveSample(
continue
}
+ stampValidStart := time.Now()
if _, err := db.validStamp(ch); err != nil {
stats.InvalidStamp++
db.logger.Debug("invalid stamp for chunk", "chunk_address", ch.Address(), "error", err)
continue
}
- stats.ValidStampDuration += time.Since(start)
+ stampValidDuration := time.Since(stampValidStart)
+ stats.ValidStampDuration += stampValidDuration
item.Stamp = postage.NewStamp(stamp.BatchID(), stamp.Index(), stamp.Timestamp(), stamp.Sig())
@@ -363,7 +369,6 @@ type SampleStats struct {
func (s *SampleStats) add(other SampleStats) {
s.TotalDuration += other.TotalDuration
- s.TotalIterated += other.TotalIterated
s.IterationDuration += other.IterationDuration
s.SampleInserts += other.SampleInserts
s.NewIgnored += other.NewIgnored
@@ -376,6 +381,7 @@ func (s *SampleStats) add(other SampleStats) {
s.ChunkLoadDuration += other.ChunkLoadDuration
s.ChunkLoadFailed += other.ChunkLoadFailed
s.StampLoadFailed += other.StampLoadFailed
+ s.TotalIterated += other.TotalIterated
}
// RandSample returns Sample with random values.
@@ -383,7 +389,7 @@ func RandSample(t *testing.T, anchor []byte) Sample {
t.Helper()
chunks := make([]swarm.Chunk, SampleSize)
- for i := 0; i < SampleSize; i++ {
+ for i := range SampleSize {
ch := chunk.GenerateTestRandomChunk()
if i%3 == 0 {
ch = chunk.GenerateTestRandomSoChunk(t, ch)
@@ -438,3 +444,30 @@ func getChunkType(chunk swarm.Chunk) swarm.ChunkType {
}
return swarm.ChunkTypeUnspecified
}
+
+func (db *DB) recordReserveSampleMetrics(duration time.Duration, stats *SampleStats, workers int, err error) {
+ status := "success"
+ if err != nil {
+ status = "failure"
+ }
+ db.metrics.ReserveSampleDuration.WithLabelValues(status).Observe(duration.Seconds())
+
+ summaryMetrics := map[string]float64{
+ "duration_seconds": duration.Seconds(),
+ "chunks_iterated": float64(stats.TotalIterated),
+ "chunks_load_failed": float64(stats.ChunkLoadFailed),
+ "stamp_validations": float64(stats.SampleInserts),
+ "invalid_stamps": float64(stats.InvalidStamp),
+ "below_balance_ignored": float64(stats.BelowBalanceIgnored),
+ "workers": float64(workers),
+ "chunks_per_second": float64(stats.TotalIterated) / duration.Seconds(),
+ "stamp_validation_duration_seconds": stats.ValidStampDuration.Seconds(),
+ "batches_below_value_duration_seconds": stats.BatchesBelowValueDuration.Seconds(),
+ }
+
+ for metric, value := range summaryMetrics {
+ db.metrics.ReserveSampleRunSummary.WithLabelValues(metric).Set(value)
+ }
+
+ db.metrics.ReserveSampleLastRunTimestamp.Set(float64(time.Now().Unix()))
+}
diff --git a/pkg/storer/sample_test.go b/pkg/storer/sample_test.go
index fd39882d367..a73ebbcf96e 100644
--- a/pkg/storer/sample_test.go
+++ b/pkg/storer/sample_test.go
@@ -6,10 +6,12 @@ package storer_test
import (
"context"
+ "fmt"
"math/rand"
"testing"
"time"
+ "github.com/ethersphere/bee/v2/pkg/cac"
"github.com/ethersphere/bee/v2/pkg/postage"
postagetesting "github.com/ethersphere/bee/v2/pkg/postage/testing"
@@ -25,8 +27,8 @@ func TestReserveSampler(t *testing.T) {
randChunks := func(baseAddr swarm.Address, timeVar uint64) []swarm.Chunk {
var chs []swarm.Chunk
- for po := 0; po < maxPO; po++ {
- for i := 0; i < chunkCountPerPO; i++ {
+ for po := range maxPO {
+ for range chunkCountPerPO {
ch := chunk.GenerateValidRandomChunkAt(t, baseAddr, po).WithBatch(3, 2, false)
if rand.Intn(2) == 0 { // 50% chance to wrap CAC into SOC
ch = chunk.GenerateTestRandomSoChunk(t, ch)
@@ -112,7 +114,6 @@ func TestReserveSampler(t *testing.T) {
assertSampleNoErrors(t, sample)
})
-
}
t.Run("disk", func(t *testing.T) {
@@ -155,7 +156,7 @@ func TestReserveSamplerSisterNeighborhood(t *testing.T) {
randChunks := func(baseAddr swarm.Address, startingRadius int, timeVar uint64) []swarm.Chunk {
var chs []swarm.Chunk
for po := startingRadius; po < maxPO; po++ {
- for i := 0; i < chunkCountPerPO; i++ {
+ for range chunkCountPerPO {
ch := chunk.GenerateValidRandomChunkAt(t, baseAddr, po).WithBatch(3, 2, false)
if rand.Intn(2) == 0 { // 50% chance to wrap CAC into SOC
ch = chunk.GenerateTestRandomSoChunk(t, ch)
@@ -233,7 +234,6 @@ func TestReserveSamplerSisterNeighborhood(t *testing.T) {
t.Fatalf("sample should not have ignored chunks")
}
})
-
}
t.Run("disk", func(t *testing.T) {
@@ -309,6 +309,63 @@ func assertValidSample(t *testing.T, sample storer.Sample, minRadius uint8, anch
}
}
+// TestSampleVectorCAC is a deterministic test vector that verifies the chunk
+// address and transformed address produced by MakeSampleUsingChunks for a
+// single hardcoded CAC chunk and anchor. It guards against regressions in the
+// BMT hashing or sampling pipeline.
+func TestSampleVectorCAC(t *testing.T) {
+ t.Parallel()
+
+ // Chunk content: 4096 bytes with repeating pattern i%256.
+ chunkContent := make([]byte, swarm.ChunkSize)
+ for i := range chunkContent {
+ chunkContent[i] = byte(i % 256)
+ }
+
+ ch, err := cac.New(chunkContent)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Attach a hardcoded (but otherwise irrelevant) stamp so that
+ // MakeSampleUsingChunks can read ch.Stamp() without panicking.
+ batchID := make([]byte, 32)
+ for i := range batchID {
+ batchID[i] = byte(i + 1)
+ }
+ sig := make([]byte, 65)
+ for i := range sig {
+ sig[i] = byte(i + 1)
+ }
+ ch = ch.WithStamp(postage.NewStamp(batchID, make([]byte, 8), make([]byte, 8), sig))
+
+ // Anchor: exactly 32 bytes, constant across runs.
+ anchor := []byte("swarm-test-anchor-deterministic!")
+
+ sample, err := storer.MakeSampleUsingChunks([]swarm.Chunk{ch}, anchor)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sample.Items) != 1 {
+ t.Fatalf("expected 1 sample item, got %d", len(sample.Items))
+ }
+
+ item := sample.Items[0]
+
+ const (
+ wantChunkAddr = "902406053a7a2f3a17f16097e1d0b4b6a4abeae6b84968f5503ae621f9522e16"
+ wantTransformedAddr = "9dee91d1ed794460474ffc942996bd713176731db4581a3c6470fe9862905a60"
+ )
+
+ if got := item.ChunkAddress.String(); got != wantChunkAddr {
+ t.Errorf("chunk address mismatch:\n got: %s\n want: %s", got, wantChunkAddr)
+ }
+ if got := item.TransformedAddress.String(); got != wantTransformedAddr {
+ t.Errorf("transformed address mismatch:\n got: %s\n want: %s", got, wantTransformedAddr)
+ }
+}
+
func assertSampleNoErrors(t *testing.T, sample storer.Sample) {
t.Helper()
@@ -325,3 +382,99 @@ func assertSampleNoErrors(t *testing.T, sample storer.Sample) {
t.Fatalf("got unexpected invalid stamps")
}
}
+
+// Benchmark results:
+// goos: linux
+// goarch: amd64
+// pkg: github.com/ethersphere/bee/v2/pkg/storer
+// cpu: Intel(R) Core(TM) Ultra 7 165U
+// BenchmarkCachePutter-14 473118 2149 ns/op 1184 B/op 24 allocs/op
+// BenchmarkReservePutter-14 48109 29760 ns/op 12379 B/op 141 allocs/op
+// BenchmarkReserveSample1k-14 100 12392598 ns/op 9364970 B/op 161383 allocs/op
+// BenchmarkSampleHashing/chunks=1000-14 9 127425952 ns/op 32.14 MB/s 69386109 B/op 814005 allocs/op
+// BenchmarkSampleHashing/chunks=10000-14 1 1241432669 ns/op 32.99 MB/s 693843032 B/op 8140005 allocs/op
+// PASS
+// ok github.com/ethersphere/bee/v2/pkg/storer 34.319s
+
+// BenchmarkReserveSample measures the end-to-end time of the ReserveSample
+// method, including DB iteration, chunk loading, stamp validation, and sample
+// assembly.
+func BenchmarkReserveSample1k(b *testing.B) {
+ const chunkCountPerPO = 100
+ const maxPO = 10
+
+ baseAddr := swarm.RandAddress(b)
+ opts := dbTestOps(baseAddr, 5000, nil, nil, time.Second)
+ opts.ValidStamp = func(ch swarm.Chunk) (swarm.Chunk, error) { return ch, nil }
+
+ st, err := diskStorer(b, opts)()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ timeVar := uint64(time.Now().UnixNano())
+
+ putter := st.ReservePutter()
+ for po := range maxPO {
+ for range chunkCountPerPO {
+ ch := chunk.GenerateValidRandomChunkAt(b, baseAddr, po).WithBatch(3, 2, false)
+ ch = ch.WithStamp(postagetesting.MustNewStampWithTimestamp(timeVar - 1))
+ if err := putter.Put(context.Background(), ch); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+
+ var (
+ radius uint8 = 5
+ anchor = swarm.RandAddressAt(b, baseAddr, int(radius)).Bytes()
+ )
+
+ b.ResetTimer()
+
+ for range b.N {
+ _, err := st.ReserveSample(context.TODO(), anchor, radius, timeVar, nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkSampleHashing measures the time taken by MakeSampleUsingChunks to
+// hash a fixed set of CAC chunks.
+func BenchmarkSampleHashing(b *testing.B) {
+ anchor := []byte("swarm-test-anchor-deterministic!")
+
+ // Shared zero-value stamp: its contents don't affect hash computation.
+ stamp := postage.NewStamp(make([]byte, 32), make([]byte, 8), make([]byte, 8), make([]byte, 65))
+
+ for _, count := range []int{1_000, 10_000} {
+ b.Run(fmt.Sprintf("chunks=%d", count), func(b *testing.B) {
+ // Build chunks once outside the measured loop.
+ // Content is derived deterministically from the chunk index so
+ // that every run produces the same set of chunk addresses.
+ chunks := make([]swarm.Chunk, count)
+ content := make([]byte, swarm.ChunkSize)
+ for i := range chunks {
+ for j := range content {
+ content[j] = byte(i + j)
+ }
+ ch, err := cac.New(content)
+ if err != nil {
+ b.Fatal(err)
+ }
+ chunks[i] = ch.WithStamp(stamp)
+ }
+
+ // Report throughput so the output shows MB/s as well as ns/op.
+ b.SetBytes(int64(count) * swarm.ChunkSize)
+ b.ResetTimer()
+
+ for range b.N {
+ if _, err := storer.MakeSampleUsingChunks(chunks, anchor); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ }
+}
diff --git a/pkg/storer/storer.go b/pkg/storer/storer.go
index cafcb3c6a57..e3318c9b278 100644
--- a/pkg/storer/storer.go
+++ b/pkg/storer/storer.go
@@ -165,6 +165,7 @@ type RadiusChecker interface {
IsWithinStorageRadius(addr swarm.Address) bool
StorageRadius() uint8
CommittedDepth() uint8
+ CapacityDoubling() uint8
}
// LocalStore is a read-only ChunkStore. It can be used to check if chunk is known
@@ -188,7 +189,7 @@ type memFS struct {
}
func (m *memFS) Open(path string) (fs.File, error) {
- return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644)
+ return m.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644)
}
type dirFS struct {
diff --git a/pkg/storer/subscribe_push.go b/pkg/storer/subscribe_push.go
index bd34956b387..237b155b66c 100644
--- a/pkg/storer/subscribe_push.go
+++ b/pkg/storer/subscribe_push.go
@@ -23,9 +23,7 @@ func (db *DB) SubscribePush(ctx context.Context) (<-chan swarm.Chunk, func()) {
stopChanOnce sync.Once
)
- db.subscriptionsWG.Add(1)
- go func() {
- defer db.subscriptionsWG.Done()
+ db.subscriptionsWG.Go(func() {
trigger, unsub := db.events.Subscribe(subscribePushEventKey)
defer unsub()
@@ -80,7 +78,7 @@ func (db *DB) SubscribePush(ctx context.Context) (<-chan swarm.Chunk, func()) {
// wait for the next event
}
}
- }()
+ })
stop := func() {
stopChanOnce.Do(func() {
diff --git a/pkg/storer/uploadstore_test.go b/pkg/storer/uploadstore_test.go
index 095904ade7d..fc53e5281d6 100644
--- a/pkg/storer/uploadstore_test.go
+++ b/pkg/storer/uploadstore_test.go
@@ -196,7 +196,7 @@ func testUploadStore(t *testing.T, newStorer func() (*storer.DB, error)) {
chunks := chunktesting.GenerateTestRandomChunks(10)
for _, ch := range chunks {
- for i := 0; i < 2; i++ {
+ for range 2 {
err := session.Put(context.TODO(), ch)
if err != nil {
t.Fatalf("session.Put(...): unexpected error: %v", err)
@@ -273,7 +273,7 @@ func testListDeleteSessions(t *testing.T, newStorer func() (*storer.DB, error))
t.Fatal(err)
}
- for i := 0; i < 10; i++ {
+ for range 10 {
_, err := lstore.NewSession()
if err != nil {
t.Fatalf("NewSession(): unexpected error: %v", err)
diff --git a/pkg/storer/validate.go b/pkg/storer/validate.go
index 0220c4fb899..01f6904d02c 100644
--- a/pkg/storer/validate.go
+++ b/pkg/storer/validate.go
@@ -153,15 +153,13 @@ func validateWork(logger log.Logger, store storage.Store, readFn func(context.Co
var wg sync.WaitGroup
- for i := 0; i < 8; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
+ for range 8 {
+ wg.Go(func() {
buf := make([]byte, swarm.SocMaxChunkSize)
for item := range iteratateItemsC {
validChunk(item, buf[:item.Location.Length])
}
- }()
+ })
}
count := 0
@@ -331,10 +329,8 @@ func (p *PinIntegrity) Check(ctx context.Context, logger log.Logger, pin string,
iteratateItemsC := make(chan *chunkstore.RetrievalIndexItem)
- for i := 0; i < 8; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
+ for range 8 {
+ wg.Go(func() {
buf := make([]byte, swarm.SocMaxChunkSize)
for item := range iteratateItemsC {
if ctx.Err() != nil {
@@ -344,7 +340,7 @@ func (p *PinIntegrity) Check(ctx context.Context, logger log.Logger, pin string,
invalid.Add(1)
}
}
- }()
+ })
}
var count, micrs int64
diff --git a/pkg/swarm/proximity.go b/pkg/swarm/proximity.go
index d68b9cd4795..43fefa81de2 100644
--- a/pkg/swarm/proximity.go
+++ b/pkg/swarm/proximity.go
@@ -28,7 +28,7 @@ func Proximity(one, other []byte) (ret uint8) {
var m uint8 = 8
for i := uint8(0); i < b; i++ {
oxo := one[i] ^ other[i]
- for j := uint8(0); j < m; j++ {
+ for j := range m {
if (oxo>>(7-j))&0x01 != 0 {
return i*8 + j
}
@@ -48,7 +48,7 @@ func ExtendedProximity(one, other []byte) (ret uint8) {
var m uint8 = 8
for i := uint8(0); i < b; i++ {
oxo := one[i] ^ other[i]
- for j := uint8(0); j < m; j++ {
+ for j := range m {
if (oxo>>(7-j))&0x01 != 0 {
return i*8 + j
}
diff --git a/pkg/swarm/test_helpers.go b/pkg/swarm/test_helpers.go
index b043e1bb17d..e0d614ae0b1 100644
--- a/pkg/swarm/test_helpers.go
+++ b/pkg/swarm/test_helpers.go
@@ -55,7 +55,7 @@ func RandAddresses(tb testing.TB, count int) []Address {
tb.Helper()
result := make([]Address, count)
- for i := 0; i < count; i++ {
+ for i := range count {
result[i] = RandAddress(tb)
}
return result
diff --git a/pkg/swarm/test_helpers_test.go b/pkg/swarm/test_helpers_test.go
index e841fd5c6c5..11d88842681 100644
--- a/pkg/swarm/test_helpers_test.go
+++ b/pkg/swarm/test_helpers_test.go
@@ -29,7 +29,7 @@ func Test_RandAddressAt(t *testing.T) {
hw0 := []byte{b0[0], b0[1], 0, 0} // highest words of base address
hw0int := binary.BigEndian.Uint32(hw0)
- for bitsInCommon := 0; bitsInCommon < 30; bitsInCommon++ {
+ for bitsInCommon := range 30 {
addr := swarm.RandAddressAt(t, base, bitsInCommon)
assertNotZeroAddress(t, addr)
@@ -39,7 +39,7 @@ func Test_RandAddressAt(t *testing.T) {
//bb0 is the bit mask to AND with hw0 and hw1
bb0 := uint32(0)
- for i := 0; i < bitsInCommon; i++ {
+ for i := range bitsInCommon {
bb0 |= (1 << (31 - i))
}
@@ -62,7 +62,7 @@ func Test_RandAddresses(t *testing.T) {
if got := len(addrs); got != count {
t.Fatalf("expected %d, got %d", count, got)
}
- for i := 0; i < count; i++ {
+ for i := range count {
assertNotZeroAddress(t, addrs[i])
}
}
diff --git a/pkg/swarm/utilities_test.go b/pkg/swarm/utilities_test.go
index d3be0cac487..9fbd270513a 100644
--- a/pkg/swarm/utilities_test.go
+++ b/pkg/swarm/utilities_test.go
@@ -204,7 +204,7 @@ func Test_FindStampWithBatchID(t *testing.T) {
func cloneAddresses(addrs []swarm.Address) []swarm.Address {
result := make([]swarm.Address, len(addrs))
- for i := 0; i < len(addrs); i++ {
+ for i := range addrs {
result[i] = addrs[i].Clone()
}
return result
diff --git a/pkg/topology/kademlia/binprefix.go b/pkg/topology/kademlia/binprefix.go
index 94e2550ea2d..4b1b119cffe 100644
--- a/pkg/topology/kademlia/binprefix.go
+++ b/pkg/topology/kademlia/binprefix.go
@@ -17,7 +17,7 @@ func generateCommonBinPrefixes(base swarm.Address, suffixLength int) [][]swarm.A
bitCombinationsCount := int(math.Pow(2, float64(suffixLength)))
bitSuffixes := make([]uint8, bitCombinationsCount)
- for i := 0; i < bitCombinationsCount; i++ {
+ for i := range bitCombinationsCount {
bitSuffixes[i] = uint8(i)
}
diff --git a/pkg/topology/kademlia/internal/metrics/metrics.go b/pkg/topology/kademlia/internal/metrics/metrics.go
index 272dfea6d3f..706271ebcaa 100644
--- a/pkg/topology/kademlia/internal/metrics/metrics.go
+++ b/pkg/topology/kademlia/internal/metrics/metrics.go
@@ -297,7 +297,7 @@ func (c *Collector) Snapshot(t time.Time, addresses ...swarm.Address) map[string
}
if len(addresses) == 0 {
- c.counters.Range(func(key, val interface{}) bool {
+ c.counters.Range(func(key, val any) bool {
cs := val.(*Counters)
snapshot[cs.peerAddress.ByteString()] = cs.snapshot(t)
return true
@@ -381,8 +381,8 @@ func (c *Collector) Inspect(addr swarm.Address) *Snapshot {
// Flush sync the dirty in memory counters for all peers by flushing their
// values to the underlying storage.
func (c *Collector) Flush() error {
- counters := make(map[string]interface{})
- c.counters.Range(func(key, val interface{}) bool {
+ counters := make(map[string]any)
+ c.counters.Range(func(key, val any) bool {
cs := val.(*Counters)
counters[cs.peerAddress.ByteString()] = val
return true
@@ -396,7 +396,7 @@ func (c *Collector) Flush() error {
// Finalize tries to log out all ongoing peer sessions.
func (c *Collector) Finalize(t time.Time, remove bool) error {
- c.counters.Range(func(_, val interface{}) bool {
+ c.counters.Range(func(_, val any) bool {
cs := val.(*Counters)
PeerLogOut(t)(cs)
return true
@@ -407,7 +407,7 @@ func (c *Collector) Finalize(t time.Time, remove bool) error {
}
if remove {
- c.counters.Range(func(_, val interface{}) bool {
+ c.counters.Range(func(_, val any) bool {
cs := val.(*Counters)
c.counters.Delete(cs.peerAddress.ByteString())
return true
diff --git a/pkg/topology/kademlia/kademlia.go b/pkg/topology/kademlia/kademlia.go
index 78d5feb0c40..6dae0c69fa5 100644
--- a/pkg/topology/kademlia/kademlia.go
+++ b/pkg/topology/kademlia/kademlia.go
@@ -36,16 +36,15 @@ import (
const loggerName = "kademlia"
const (
- maxConnAttempts = 1 // when there is maxConnAttempts failed connect calls for a given peer it is considered non-connectable
- maxBootNodeAttempts = 3 // how many attempts to dial to boot-nodes before giving up
- maxNeighborAttempts = 3 // how many attempts to dial to boot-nodes before giving up
+ maxConnAttempts = 4 // when there is maxConnAttempts failed connect calls for a given peer it is considered non-connectable
+ maxBootNodeAttempts = 6 // how many attempts to dial to boot-nodes before giving up
+ maxNeighborAttempts = 6 // how many attempts to dial to boot-nodes before giving up
addPeerBatchSize = 500
- // To avoid context.Timeout errors during network failure, the value of
- // the peerConnectionAttemptTimeout constant must be equal to or greater
- // than 5 seconds (empirically verified).
- peerConnectionAttemptTimeout = 15 * time.Second // timeout for establishing a new connection with peer.
+ // Each underlay address gets up to 15s for connection (in libp2p.Connect).
+ // This budget allows multiple addresses to be tried sequentially per peer.
+ peerConnectionAttemptTimeout = 45 * time.Second // timeout for establishing a new connection with peer.
)
// Default option values
@@ -55,7 +54,7 @@ const (
defaultSaturationPeers = 8
defaultOverSaturationPeers = 18
defaultBootNodeOverSaturationPeers = 20
- defaultShortRetry = 30 * time.Second
+ defaultShortRetry = 10 * time.Second
defaultTimeToRetry = 2 * defaultShortRetry
defaultPruneWakeup = 5 * time.Minute
defaultBroadcastBinSize = 2
@@ -280,7 +279,7 @@ func New(
k.bgBroadcastCtx, k.bgBroadcastCancel = context.WithCancel(context.Background())
- k.metrics.ReachabilityStatus.WithLabelValues(p2p.ReachabilityStatusUnknown.String()).Set(0)
+ k.metrics.ReachabilityStatus.WithLabelValues(p2p.ReachabilityStatusUnknown.String()).Set(1)
return k, nil
}
@@ -429,23 +428,26 @@ func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup,
}
}
- switch err = k.connect(ctx, peer.addr, bzzAddr.Underlay); {
+ switch err = k.connect(ctx, peer.addr, bzzAddr.Underlays); {
case errors.Is(err, p2p.ErrNetworkUnavailable):
- k.logger.Debug("network unavailable when reaching peer", "peer_overlay_address", peer.addr, "peer_underlay_address", bzzAddr.Underlay)
+ k.logger.Debug("network unavailable when reaching peer", "peer_address", peer.addr, "peer_underlays", bzzAddr.Underlays)
return
case errors.Is(err, errPruneEntry):
- k.logger.Debug("dial to light node", "peer_overlay_address", peer.addr, "peer_underlay_address", bzzAddr.Underlay)
+ k.logger.Debug("dial to light node", "peer_address", peer.addr, "peer_underlays", bzzAddr.Underlays)
remove(peer)
return
case errors.Is(err, errOverlayMismatch):
- k.logger.Debug("overlay mismatch has occurred", "peer_overlay_address", peer.addr, "peer_underlay_address", bzzAddr.Underlay)
+ k.logger.Debug("overlay mismatch has occurred", "peer_address", peer.addr, "peer_underlays", bzzAddr.Underlays)
remove(peer)
return
case errors.Is(err, p2p.ErrPeerBlocklisted):
- k.logger.Debug("peer still in blocklist", "peer_address", bzzAddr)
+ k.logger.Debug("peer still in blocklist", "peer_address", peer.addr, "peer_underlays", bzzAddr.Underlays)
+ return
+ case errors.Is(err, p2p.ErrUnsupportedAddresses):
+ k.logger.Debug("peer has no supported addresses", "peer_address", peer.addr, "peer_underlays", bzzAddr.Underlays)
return
case err != nil:
- k.logger.Debug("peer not reachable from kademlia", "peer_address", bzzAddr, "error", err)
+ k.logger.Debug("peer not reachable from kademlia", "peer_address", peer.addr, "peer_underlays", bzzAddr.Underlays, "error", err)
return
}
@@ -533,9 +535,7 @@ func (k *Kad) manage() {
balanceChan := make(chan *peerConnInfo)
go k.connectionAttemptsHandler(ctx, &wg, neighbourhoodChan, balanceChan)
- k.wg.Add(1)
- go func() {
- defer k.wg.Done()
+ k.wg.Go(func() {
for {
select {
case <-k.halt:
@@ -546,11 +546,9 @@ func (k *Kad) manage() {
k.opt.PruneFunc(k.neighborhoodDepth())
}
}
- }()
+ })
- k.wg.Add(1)
- go func() {
- defer k.wg.Done()
+ k.wg.Go(func() {
for {
select {
case <-k.halt:
@@ -569,12 +567,10 @@ func (k *Kad) manage() {
}
}
}
- }()
+ })
// tell each neighbor about other neighbors periodically
- k.wg.Add(1)
- go func() {
- defer k.wg.Done()
+ k.wg.Go(func() {
for {
select {
case <-k.halt:
@@ -597,7 +593,7 @@ func (k *Kad) manage() {
}
}
}
- }()
+ })
for {
select {
@@ -813,9 +809,6 @@ func (k *Kad) connectBootNodes(ctx context.Context) {
var attempts, connected int
totalAttempts := maxBootNodeAttempts * len(k.opt.Bootnodes)
- ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
- defer cancel()
-
for _, addr := range k.opt.Bootnodes {
if attempts >= totalAttempts || connected >= 3 {
return
@@ -826,18 +819,21 @@ func (k *Kad) connectBootNodes(ctx context.Context) {
if attempts >= maxBootNodeAttempts {
return true, nil
}
- bzzAddress, err := k.p2p.Connect(ctx, addr)
+
+ ctx, cancel := context.WithTimeout(ctx, peerConnectionAttemptTimeout)
+ defer cancel()
+
+ bzzAddress, err := k.p2p.Connect(ctx, []ma.Multiaddr{addr})
attempts++
k.metrics.TotalBootNodesConnectionAttempts.Inc()
if err != nil {
if !errors.Is(err, p2p.ErrAlreadyConnected) {
- k.logger.Debug("connect to bootnode failed", "bootnode_address", addr, "error", err)
- k.logger.Warning("connect to bootnode failed", "bootnode_address", addr)
+ k.logger.Error(err, "connect to bootnode failed", "bootnode_address", addr)
return false, err
}
- k.logger.Debug("connect to bootnode failed", "bootnode_address", addr, "error", err)
+ k.logger.Debug("bootnode already connected", "bootnode_address", addr)
return false, nil
}
@@ -964,7 +960,7 @@ func (k *Kad) recalcDepth() {
// connect connects to a peer and gossips its address to our connected peers,
// as well as sends the peers we are connected to the newly connected peer
-func (k *Kad) connect(ctx context.Context, peer swarm.Address, ma ma.Multiaddr) error {
+func (k *Kad) connect(ctx context.Context, peer swarm.Address, ma []ma.Multiaddr) error {
k.logger.Debug("attempting connect to peer", "peer_address", peer)
ctx, cancel := context.WithTimeout(ctx, peerConnectionAttemptTimeout)
@@ -988,8 +984,10 @@ func (k *Kad) connect(ctx context.Context, peer swarm.Address, ma ma.Multiaddr)
return err
case errors.Is(err, p2p.ErrPeerBlocklisted):
return err
+ case errors.Is(err, p2p.ErrUnsupportedAddresses):
+ return err
case err != nil:
- k.logger.Debug("could not connect to peer", "peer_address", peer, "error", err)
+ k.logger.Info("could not connect to peer", "peer_address", peer, "error", err)
retryTime := time.Now().Add(k.opt.TimeToRetry)
var e *p2p.ConnectionBackoffError
@@ -1041,7 +1039,7 @@ func (k *Kad) Announce(ctx context.Context, peer swarm.Address, fullnode bool) e
isNeighbor := swarm.Proximity(peer.Bytes(), k.base.Bytes()) >= depth
outer:
- for bin := uint8(0); bin < swarm.MaxBins; bin++ {
+ for bin := range swarm.MaxBins {
var (
connectedPeers []swarm.Address
@@ -1121,8 +1119,17 @@ func (k *Kad) AnnounceTo(ctx context.Context, addressee, peer swarm.Address, ful
// This does not guarantee that a connection will immediately
// be made to the peer.
func (k *Kad) AddPeers(addrs ...swarm.Address) {
- k.knownPeers.Add(addrs...)
- k.notifyManageLoop()
+ toAdd := make([]swarm.Address, 0, len(addrs))
+ for _, addr := range addrs {
+ if !addr.Equal(k.base) {
+ toAdd = append(toAdd, addr)
+ }
+ }
+
+ if len(toAdd) > 0 {
+ k.knownPeers.Add(toAdd...)
+ k.notifyManageLoop()
+ }
}
func (k *Kad) Pick(peer p2p.Peer) bool {
@@ -1356,7 +1363,8 @@ func (k *Kad) UpdateReachability(status p2p.ReachabilityStatus) {
}
k.logger.Debug("reachability updated", "reachability", status)
k.reachability = status
- k.metrics.ReachabilityStatus.WithLabelValues(status.String()).Set(0)
+ k.metrics.ReachabilityStatus.Reset()
+ k.metrics.ReachabilityStatus.WithLabelValues(status.String()).Set(1)
}
// UpdateReachability updates node reachability status.
@@ -1589,7 +1597,7 @@ func randomSubset(addrs []swarm.Address, count int) ([]swarm.Address, error) {
return addrs, nil
}
- for i := 0; i < len(addrs); i++ {
+ for i := range addrs {
b, err := random.Int(random.Reader, big.NewInt(int64(len(addrs))))
if err != nil {
return nil, err
diff --git a/pkg/topology/kademlia/kademlia_test.go b/pkg/topology/kademlia/kademlia_test.go
index bca195cdb10..6750ebe2284 100644
--- a/pkg/topology/kademlia/kademlia_test.go
+++ b/pkg/topology/kademlia/kademlia_test.go
@@ -11,6 +11,7 @@ import (
"math"
"math/rand"
"reflect"
+ "strconv"
"sync"
"sync/atomic"
"testing"
@@ -69,7 +70,7 @@ func TestNeighborhoodDepth(t *testing.T) {
testutil.CleanupCloser(t, kad)
// add 2 peers in bin 8
- for i := 0; i < 2; i++ {
+ for range 2 {
addr := swarm.RandAddressAt(t, base, 8)
addOne(t, signer, kad, ab, addr)
@@ -79,9 +80,9 @@ func TestNeighborhoodDepth(t *testing.T) {
// depth is 0
kDepth(t, kad, 0)
- var shallowPeers []swarm.Address
+ shallowPeers := make([]swarm.Address, 0, 2)
// add two first peers (po0,po1)
- for i := 0; i < 2; i++ {
+ for i := range 2 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, signer, kad, ab, addr)
shallowPeers = append(shallowPeers, addr)
@@ -114,8 +115,8 @@ func TestNeighborhoodDepth(t *testing.T) {
// now add peers from bin 0 and expect the depth
// to shift. the depth will be that of the shallowest
// unsaturated bin.
- for i := 0; i < 7; i++ {
- for j := 0; j < 3; j++ {
+ for i := range 7 {
+ for range 3 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, signer, kad, ab, addr)
waitConn(t, &conns)
@@ -148,7 +149,7 @@ func TestNeighborhoodDepth(t *testing.T) {
kDepth(t, kad, 7)
// now fill bin 7 so that it is saturated, expect depth 8
- for i := 0; i < 3; i++ {
+ for range 3 {
addr := swarm.RandAddressAt(t, base, 7)
addOne(t, signer, kad, ab, addr)
waitConn(t, &conns)
@@ -164,7 +165,7 @@ func TestNeighborhoodDepth(t *testing.T) {
var addrs []swarm.Address
// fill the rest up to the bin before last and check that everything works at the edges
for i := 9; i < int(swarm.MaxBins); i++ {
- for j := 0; j < 4; j++ {
+ for range 4 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, signer, kad, ab, addr)
waitConn(t, &conns)
@@ -174,7 +175,7 @@ func TestNeighborhoodDepth(t *testing.T) {
}
// add a whole bunch of peers in the last bin, expect depth to stay at 31
- for i := 0; i < 15; i++ {
+ for range 15 {
addr = swarm.RandAddressAt(t, base, int(swarm.MaxPO))
addOne(t, signer, kad, ab, addr)
}
@@ -187,7 +188,7 @@ func TestNeighborhoodDepth(t *testing.T) {
kDepth(t, kad, 30)
// empty bin 9 and expect depth 9
- for i := 0; i < 4; i++ {
+ for i := range 4 {
removeOne(kad, addrs[i])
}
kDepth(t, kad, 9)
@@ -216,7 +217,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) {
kad.SetStorageRadius(0)
// add 2 peers in bin 8
- for i := 0; i < 2; i++ {
+ for range 2 {
addr := swarm.RandAddressAt(t, base, 8)
addOne(t, signer, kad, ab, addr)
kad.Reachable(addr, p2p.ReachabilityStatusPublic)
@@ -227,9 +228,9 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) {
// depth is 0
kDepth(t, kad, 0)
- var shallowPeers []swarm.Address
+ shallowPeers := make([]swarm.Address, 0, 2)
// add two first peers (po0,po1)
- for i := 0; i < 2; i++ {
+ for i := range 2 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, signer, kad, ab, addr)
kad.Reachable(addr, p2p.ReachabilityStatusPublic)
@@ -262,8 +263,8 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) {
// now add peers from bin 0 and expect the depth
// to shift. the depth will be that of the shallowest
// unsaturated bin.
- for i := 0; i < 7; i++ {
- for j := 0; j < 3; j++ {
+ for i := range 7 {
+ for range 3 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, signer, kad, ab, addr)
kad.Reachable(addr, p2p.ReachabilityStatusPublic)
@@ -292,7 +293,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) {
kDepth(t, kad, 7)
// now fill bin 7 so that it is saturated, expect depth 8
- for i := 0; i < 3; i++ {
+ for range 3 {
addr := swarm.RandAddressAt(t, base, 7)
addOne(t, signer, kad, ab, addr)
kad.Reachable(addr, p2p.ReachabilityStatusPublic)
@@ -310,7 +311,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) {
var addrs []swarm.Address
// fill the rest up to the bin before last and check that everything works at the edges
for i := 9; i < int(swarm.MaxBins); i++ {
- for j := 0; j < 4; j++ {
+ for range 4 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, signer, kad, ab, addr)
kad.Reachable(addr, p2p.ReachabilityStatusPublic)
@@ -321,7 +322,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) {
}
// add a whole bunch of peers in the last bin, expect depth to stay at 31
- for i := 0; i < 15; i++ {
+ for range 15 {
addr = swarm.RandAddressAt(t, base, int(swarm.MaxPO))
addOne(t, signer, kad, ab, addr)
kad.Reachable(addr, p2p.ReachabilityStatusPublic)
@@ -335,7 +336,7 @@ func TestNeighborhoodDepthWithReachability(t *testing.T) {
kDepth(t, kad, 30)
// empty bin 9 and expect depth 9
- for i := 0; i < 4; i++ {
+ for i := range 4 {
removeOne(kad, addrs[i])
}
kDepth(t, kad, 9)
@@ -365,7 +366,7 @@ func TestManage(t *testing.T) {
kad.SetStorageRadius(0)
// first, we add peers to bin 0
- for i := 0; i < saturation; i++ {
+ for range saturation {
addr := swarm.RandAddressAt(t, base, 0)
addOne(t, signer, kad, ab, addr)
}
@@ -373,7 +374,7 @@ func TestManage(t *testing.T) {
waitCounter(t, &conns, int32(saturation))
// next, we add peers to the next bin
- for i := 0; i < saturation; i++ {
+ for range saturation {
addr := swarm.RandAddressAt(t, base, 1)
addOne(t, signer, kad, ab, addr)
}
@@ -383,7 +384,7 @@ func TestManage(t *testing.T) {
kad.SetStorageRadius(1)
// here, we attempt to add to bin 0, but bin is saturated, so no new peers should connect to it
- for i := 0; i < saturation; i++ {
+ for range saturation {
addr := swarm.RandAddressAt(t, base, 0)
addOne(t, signer, kad, ab, addr)
}
@@ -466,8 +467,8 @@ func TestBinSaturation(t *testing.T) {
// add two peers in a few bins to generate some depth >= 0, this will
// make the next iteration result in binSaturated==true, causing no new
// connections to be made
- for i := 0; i < 5; i++ {
- for j := 0; j < 2; j++ {
+ for i := range 5 {
+ for range 2 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, signer, kad, ab, addr)
}
@@ -479,7 +480,7 @@ func TestBinSaturation(t *testing.T) {
// add one more peer in each bin shallower than depth and
// expect no connections due to saturation. if we add a peer within
// depth, the short circuit will be hit and we will connect to the peer
- for i := 0; i < 3; i++ {
+ for i := range 3 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, signer, kad, ab, addr)
}
@@ -514,8 +515,8 @@ func TestOversaturation(t *testing.T) {
testutil.CleanupCloser(t, kad)
// Add maximum accepted number of peers up until bin 5 without problems
- for i := 0; i < 6; i++ {
- for j := 0; j < kademlia.DefaultOverSaturationPeers; j++ {
+ for i := range 6 {
+ for range kademlia.DefaultOverSaturationPeers {
addr := swarm.RandAddressAt(t, base, i)
// if error is not nil as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, nil)
@@ -527,9 +528,9 @@ func TestOversaturation(t *testing.T) {
// see depth is 5
kDepth(t, kad, 5)
- for k := 0; k < 5; k++ {
+ for k := range 5 {
// no further connections can be made
- for l := 0; l < 3; l++ {
+ for range 3 {
addr := swarm.RandAddressAt(t, base, k)
// if error is not as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, topology.ErrOversaturated)
@@ -566,8 +567,8 @@ func TestOversaturationBootnode(t *testing.T) {
testutil.CleanupCloser(t, kad)
// Add maximum accepted number of peers up until bin 5 without problems
- for i := 0; i < 6; i++ {
- for j := 0; j < overSaturationPeers; j++ {
+ for i := range 6 {
+ for range overSaturationPeers {
addr := swarm.RandAddressAt(t, base, i)
// if error is not nil as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, nil)
@@ -579,9 +580,9 @@ func TestOversaturationBootnode(t *testing.T) {
// see depth is 5
kDepth(t, kad, 5)
- for k := 0; k < 5; k++ {
+ for k := range 5 {
// further connections should succeed outside of depth
- for l := 0; l < 3; l++ {
+ for range 3 {
addr := swarm.RandAddressAt(t, base, k)
// if error is not as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, nil)
@@ -595,7 +596,7 @@ func TestOversaturationBootnode(t *testing.T) {
}
// see we can still add / not limiting more peers in neighborhood depth
- for m := 0; m < 12; m++ {
+ for range 12 {
addr := swarm.RandAddressAt(t, base, 5)
// if error is not nil as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, nil)
@@ -624,8 +625,8 @@ func TestBootnodeMaxConnections(t *testing.T) {
testutil.CleanupCloser(t, kad)
// Add maximum accepted number of peers up until bin 5 without problems
- for i := 0; i < 6; i++ {
- for j := 0; j < bootnodeOverSaturationPeers; j++ {
+ for i := range 6 {
+ for range bootnodeOverSaturationPeers {
addr := swarm.RandAddressAt(t, base, i)
// if error is not nil as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, nil)
@@ -640,9 +641,9 @@ func TestBootnodeMaxConnections(t *testing.T) {
depth := 5
outSideDepthPeers := 5
- for k := 0; k < depth; k++ {
+ for k := range depth {
// further connections should succeed outside of depth
- for l := 0; l < outSideDepthPeers; l++ {
+ for range outSideDepthPeers {
addr := swarm.RandAddressAt(t, base, k)
// if error is not as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, nil)
@@ -825,7 +826,7 @@ func TestAddressBookPrune(t *testing.T) {
}
testutil.CleanupCloser(t, kad)
- nonConnPeer, err := bzz.NewAddress(signer, nonConnectableAddress, swarm.RandAddressAt(t, base, 1), 0, nil)
+ nonConnPeer, err := bzz.NewAddress(signer, []ma.Multiaddr{nonConnectableAddress}, swarm.RandAddressAt(t, base, 1), 0, nil)
if err != nil {
t.Fatal(err)
}
@@ -836,11 +837,12 @@ func TestAddressBookPrune(t *testing.T) {
// add non connectable peer, check connection and failed connection counters
kad.AddPeers(nonConnPeer.Overlay)
- kad.Trigger()
- kad.Trigger()
+ for range 5 {
+ kad.Trigger()
+ }
waitCounter(t, &conns, 0)
- waitCounter(t, &failedConns, 3)
+ waitCounter(t, &failedConns, 6)
_, err = ab.Get(nonConnPeer.Overlay)
if !errors.Is(err, addressbook.ErrNotFound) {
@@ -903,7 +905,7 @@ func TestAddressBookQuickPrune_FLAKY(t *testing.T) {
time.Sleep(100 * time.Millisecond)
- nonConnPeer, err := bzz.NewAddress(signer, nonConnectableAddress, swarm.RandAddressAt(t, base, 1), 0, nil)
+ nonConnPeer, err := bzz.NewAddress(signer, []ma.Multiaddr{nonConnectableAddress}, swarm.RandAddressAt(t, base, 1), 0, nil)
if err != nil {
t.Fatal(err)
}
@@ -922,6 +924,13 @@ func TestAddressBookQuickPrune_FLAKY(t *testing.T) {
waitCounter(t, &conns, 0)
waitCounter(t, &failedConns, 1)
+ // we need to trigger connection attempts maxConnAttempts times
+ for range 3 {
+ time.Sleep(10 * time.Millisecond)
+ kad.Trigger()
+ waitCounter(t, &failedConns, 1)
+ }
+
_, err = ab.Get(nonConnPeer.Overlay)
if !errors.Is(err, addressbook.ErrNotFound) {
t.Fatal(err)
@@ -1109,7 +1118,7 @@ func TestKademlia_SubscribeTopologyChange(t *testing.T) {
c2, u2 := kad.SubscribeTopologyChange()
defer u2()
- for i := 0; i < 4; i++ {
+ for i := range 4 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, sg, kad, ab, addr)
}
@@ -1129,14 +1138,14 @@ func TestKademlia_SubscribeTopologyChange(t *testing.T) {
c, u := kad.SubscribeTopologyChange()
defer u()
- for i := 0; i < 4; i++ {
+ for i := range 4 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, sg, kad, ab, addr)
}
testSignal(t, c)
- for i := 0; i < 4; i++ {
+ for i := range 4 {
addr := swarm.RandAddressAt(t, base, i)
addOne(t, sg, kad, ab, addr)
}
@@ -1210,9 +1219,9 @@ func getBinPopulation(bins *topology.KadBins, po uint8) uint64 {
func TestStart(t *testing.T) {
t.Parallel()
- var bootnodes []ma.Multiaddr
- var bootnodesOverlays []swarm.Address
- for i := 0; i < 10; i++ {
+ bootnodes := make([]ma.Multiaddr, 0, 10)
+ bootnodesOverlays := make([]swarm.Address, 0, 10)
+ for range 10 {
overlay := swarm.RandAddress(t)
multiaddr, err := ma.NewMultiaddr(underlayBase + overlay.String())
@@ -1231,13 +1240,13 @@ func TestStart(t *testing.T) {
var conns, failedConns int32 // how many connect calls were made to the p2p mock
_, kad, ab, _, signer := newTestKademlia(t, &conns, &failedConns, kademlia.Options{Bootnodes: bootnodes})
- for i := 0; i < 3; i++ {
+ for range 3 {
peer := swarm.RandAddress(t)
multiaddr, err := ma.NewMultiaddr(underlayBase + peer.String())
if err != nil {
t.Fatal(err)
}
- bzzAddr, err := bzz.NewAddress(signer, multiaddr, peer, 0, nil)
+ bzzAddr, err := bzz.NewAddress(signer, []ma.Multiaddr{multiaddr}, peer, 0, nil)
if err != nil {
t.Fatal(err)
}
@@ -1324,7 +1333,7 @@ func TestOutofDepthPrune(t *testing.T) {
testutil.CleanupCloser(t, kad)
// bin 0,1 balanced, rest not
- for i := 0; i < 6; i++ {
+ for i := range 6 {
var peers []swarm.Address
if i < 2 {
peers = mineBin(t, base, i, 20, true)
@@ -1350,7 +1359,7 @@ func TestOutofDepthPrune(t *testing.T) {
// check that no pruning has happened
bins := binSizes(kad)
- for i := 0; i < 6; i++ {
+ for i := range 6 {
if bins[i] <= overSaturationPeers {
t.Fatalf("bin %d, got %d, want more than %d", i, bins[i], overSaturationPeers)
}
@@ -1375,7 +1384,7 @@ func TestOutofDepthPrune(t *testing.T) {
// check bins have been pruned
bins = binSizes(kad)
- for i := uint8(0); i < 5; i++ {
+ for i := range uint8(5) {
if bins[i] != overSaturationPeers {
t.Fatalf("bin %d, got %d, want %d", i, bins[i], overSaturationPeers)
}
@@ -1426,7 +1435,7 @@ func TestPruneExcludeOps(t *testing.T) {
testutil.CleanupCloser(t, kad)
// bin 0,1 balanced, rest not
- for i := 0; i < 6; i++ {
+ for i := range 6 {
var peers []swarm.Address
if i < 2 {
peers = mineBin(t, base, i, perBin, true)
@@ -1441,7 +1450,7 @@ func TestPruneExcludeOps(t *testing.T) {
kad.Reachable(peers[i], p2p.ReachabilityStatusPublic)
}
}
- for i := 0; i < 4; i++ {
+ for range 4 {
}
time.Sleep(time.Millisecond * 10)
kDepth(t, kad, i)
@@ -1459,7 +1468,7 @@ func TestPruneExcludeOps(t *testing.T) {
// check that no pruning has happened
bins := binSizes(kad)
- for i := 0; i < 6; i++ {
+ for i := range 6 {
if bins[i] <= overSaturationPeers {
t.Fatalf("bin %d, got %d, want more than %d", i, bins[i], overSaturationPeers)
}
@@ -1484,7 +1493,7 @@ func TestPruneExcludeOps(t *testing.T) {
// check bins have NOT been pruned because the peer count func excluded unreachable peers
bins = binSizes(kad)
- for i := uint8(0); i < 5; i++ {
+ for i := range uint8(5) {
if bins[i] != perBin {
t.Fatalf("bin %d, got %d, want %d", i, bins[i], perBin)
}
@@ -1501,7 +1510,7 @@ func TestBootnodeProtectedNodes(t *testing.T) {
// create base and protected nodes addresses
base := swarm.RandAddress(t)
protected := make([]swarm.Address, 6)
- for i := 0; i < 6; i++ {
+ for i := range 6 {
addr := swarm.RandAddressAt(t, base, i)
protected[i] = addr
}
@@ -1526,8 +1535,8 @@ func TestBootnodeProtectedNodes(t *testing.T) {
testutil.CleanupCloser(t, kad)
// Add maximum accepted number of peers up until bin 5 without problems
- for i := 0; i < 6; i++ {
- for j := 0; j < overSaturationPeers; j++ {
+ for i := range 6 {
+ for range overSaturationPeers {
// if error is not nil as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, protected[i], nil)
}
@@ -1538,7 +1547,7 @@ func TestBootnodeProtectedNodes(t *testing.T) {
// see depth is 5
kDepth(t, kad, 5)
- for k := 0; k < 5; k++ {
+ for k := range 5 {
// further connections should succeed outside of depth
addr := swarm.RandAddressAt(t, base, k)
// if error is not as specified, connectOne goes fatal
@@ -1549,14 +1558,14 @@ func TestBootnodeProtectedNodes(t *testing.T) {
// ensure protected node was not kicked out and we have more than oversaturation
// amount
sizes := binSizes(kad)
- for k := 0; k < 5; k++ {
+ for k := range 5 {
if sizes[k] != 2 {
t.Fatalf("invalid bin size expected 2 found %d", sizes[k])
}
}
- for k := 0; k < 5; k++ {
+ for k := range 5 {
// further connections should succeed outside of depth
- for l := 0; l < 3; l++ {
+ for range 3 {
addr := swarm.RandAddressAt(t, base, k)
// if error is not as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, nil)
@@ -1567,7 +1576,7 @@ func TestBootnodeProtectedNodes(t *testing.T) {
// ensure unprotected nodes are kicked out to make room for new peers and protected
// nodes are still present
sizes = binSizes(kad)
- for k := 0; k < 5; k++ {
+ for k := range 5 {
if sizes[k] != 2 {
t.Fatalf("invalid bin size expected 2 found %d", sizes[k])
}
@@ -1691,8 +1700,8 @@ func TestAnnounceNeighborhoodToNeighbor(t *testing.T) {
testutil.CleanupCloser(t, kad)
// add some peers
- for bin := 0; bin < 2; bin++ {
- for i := 0; i < 4; i++ {
+ for bin := range 2 {
+ for range 4 {
addr := swarm.RandAddressAt(t, base, bin)
addOne(t, signer, kad, ab, addr)
waitCounter(t, &conns, 1)
@@ -1703,7 +1712,7 @@ func TestAnnounceNeighborhoodToNeighbor(t *testing.T) {
kDepth(t, kad, 1)
// add many more neighbors
- for i := 0; i < 10; i++ {
+ for range 10 {
addr := swarm.RandAddressAt(t, base, 2)
addOne(t, signer, kad, ab, addr)
waitCounter(t, &conns, 1)
@@ -1740,8 +1749,8 @@ func TestIteratorOpts(t *testing.T) {
}
testutil.CleanupCloser(t, kad)
- for i := 0; i < 6; i++ {
- for j := 0; j < 4; j++ {
+ for i := range 6 {
+ for range 4 {
addr := swarm.RandAddressAt(t, base, i)
// if error is not nil as specified, connectOne goes fatal
connectOne(t, signer, kad, ab, addr, nil)
@@ -1903,7 +1912,7 @@ func mineBin(t *testing.T, base swarm.Address, bin, count int, isBalanced bool)
t.Fatal("peersCount must be greater than 8 for balanced bins")
}
- for i := 0; i < count; i++ {
+ for i := range count {
rndAddrs[i] = swarm.RandAddressAt(t, base, bin)
}
@@ -2026,11 +2035,14 @@ func p2pMock(t *testing.T, ab addressbook.Interface, signer beeCrypto.Signer, co
t.Helper()
p2ps := p2pmock.New(
- p2pmock.WithConnectFunc(func(ctx context.Context, addr ma.Multiaddr) (*bzz.Address, error) {
- if addr.Equal(nonConnectableAddress) {
- _ = atomic.AddInt32(failedCounter, 1)
- return nil, errors.New("non reachable node")
+ p2pmock.WithConnectFunc(func(ctx context.Context, addrs []ma.Multiaddr) (*bzz.Address, error) {
+ for _, addr := range addrs {
+ if addr.Equal(nonConnectableAddress) {
+ _ = atomic.AddInt32(failedCounter, 1)
+ return nil, errors.New("non reachable node")
+ }
}
+
if counter != nil {
_ = atomic.AddInt32(counter, 1)
}
@@ -2041,13 +2053,13 @@ func p2pMock(t *testing.T, ab addressbook.Interface, signer beeCrypto.Signer, co
}
for _, a := range addresses {
- if a.Underlay.Equal(addr) {
+ if bzz.AreUnderlaysEqual(a.Underlays, addrs) {
return &a, nil
}
}
address := swarm.RandAddress(t)
- bzzAddr, err := bzz.NewAddress(signer, addr, address, 0, nil)
+ bzzAddr, err := bzz.NewAddress(signer, addrs, address, 0, nil)
if err != nil {
return nil, err
}
@@ -2077,12 +2089,8 @@ const underlayBase = "/ip4/127.0.0.1/tcp/1634/dns/"
func connectOne(t *testing.T, signer beeCrypto.Signer, k *kademlia.Kad, ab addressbook.Putter, peer swarm.Address, expErr error) {
t.Helper()
- multiaddr, err := ma.NewMultiaddr(underlayBase + peer.String())
- if err != nil {
- t.Fatal(err)
- }
-
- bzzAddr, err := bzz.NewAddress(signer, multiaddr, peer, 0, nil)
+ underlays := generateMultipleUnderlays(t, 3, underlayBase+peer.String())
+ bzzAddr, err := bzz.NewAddress(signer, underlays, peer, 0, nil)
if err != nil {
t.Fatal(err)
}
@@ -2102,7 +2110,7 @@ func addOne(t *testing.T, signer beeCrypto.Signer, k *kademlia.Kad, ab addressbo
if err != nil {
t.Fatal(err)
}
- bzzAddr, err := bzz.NewAddress(signer, multiaddr, peer, 0, nil)
+ bzzAddr, err := bzz.NewAddress(signer, []ma.Multiaddr{multiaddr}, peer, 0, nil)
if err != nil {
t.Fatal(err)
}
@@ -2231,3 +2239,86 @@ func ptrInt(v int) *int {
func ptrDuration(v time.Duration) *time.Duration {
return &v
}
+
+func generateMultipleUnderlays(t *testing.T, n int, baseUnderlay string) []ma.Multiaddr {
+ t.Helper()
+ underlays := make([]ma.Multiaddr, n)
+
+ for i := 0; i < n; i++ {
+ multiaddr, err := ma.NewMultiaddr(baseUnderlay + strconv.Itoa(i))
+ if err != nil {
+ t.Fatal(err)
+ }
+ underlays[i] = multiaddr
+ }
+ return underlays
+}
+
+// TestAddPeersSkipsSelf verifies that AddPeers does not add self address
+// to the known peers list, preventing self-connection attempts.
+func TestAddPeersSkipsSelf(t *testing.T) {
+ t.Parallel()
+
+ var (
+ conns int32
+ base, kad, ab, _, signer = newTestKademlia(t, &conns, nil, kademlia.Options{
+ ExcludeFunc: defaultExcludeFunc,
+ })
+ )
+
+ if err := kad.Start(context.Background()); err != nil {
+ t.Fatal(err)
+ }
+ testutil.CleanupCloser(t, kad)
+
+ // Add some regular peers first
+ peer1 := swarm.RandAddressAt(t, base, 1)
+ peer2 := swarm.RandAddressAt(t, base, 2)
+
+ addOne(t, signer, kad, ab, peer1)
+ addOne(t, signer, kad, ab, peer2)
+
+ waitCounter(t, &conns, 2)
+
+ // Now try to add self address along with another peer
+ peer3 := swarm.RandAddressAt(t, base, 3)
+
+ // Prepare address for peer3
+ multiaddr, err := ma.NewMultiaddr(underlayBase + peer3.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ bzzAddr, err := bzz.NewAddress(signer, []ma.Multiaddr{multiaddr}, peer3, 0, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := ab.Put(peer3, *bzzAddr); err != nil {
+ t.Fatal(err)
+ }
+
+ // Try to add self (base) and peer3 together
+ kad.AddPeers(base, peer3)
+
+ // Only peer3 should result in a connection attempt, not base
+ waitCounter(t, &conns, 1)
+
+ // Verify we have exactly 3 connected peers (peer1, peer2, peer3), not 4
+ waitPeers(t, kad, 3)
+
+ // Verify base is not in the connected peers list
+ foundSelf := false
+ err = kad.EachConnectedPeer(func(addr swarm.Address, _ uint8) (bool, bool, error) {
+ if addr.Equal(base) {
+ foundSelf = true
+ return true, false, nil
+ }
+ return false, false, nil
+ }, topology.Select{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if foundSelf {
+ t.Fatal("self address should not be in connected peers")
+ }
+}
diff --git a/pkg/topology/pslice/pslice_test.go b/pkg/topology/pslice/pslice_test.go
index ee1c26ebaaf..07eadaafdd8 100644
--- a/pkg/topology/pslice/pslice_test.go
+++ b/pkg/topology/pslice/pslice_test.go
@@ -24,8 +24,8 @@ func TestShallowestEmpty(t *testing.T) {
peers = make([][]swarm.Address, 16)
)
- for i := 0; i < 16; i++ {
- for j := 0; j < 3; j++ {
+ for i := range 16 {
+ for range 3 {
a := swarm.RandAddressAt(t, base, i)
peers[i] = append(peers[i], a)
}
@@ -221,7 +221,7 @@ func TestIterators(t *testing.T) {
ps := pslice.New(4, base)
peers := make([]swarm.Address, 4)
- for i := 0; i < 4; i++ {
+ for i := range 4 {
peers[i] = swarm.RandAddressAt(t, base, i)
}
@@ -286,7 +286,7 @@ func TestBinPeers(t *testing.T) {
// prepare slice
ps := pslice.New(len(tc.peersCount), base)
for bin, peersCount := range tc.peersCount {
- for i := 0; i < peersCount; i++ {
+ for range peersCount {
peer := swarm.RandAddressAt(t, base, bin)
binPeers[bin] = append(binPeers[bin], peer)
ps.Add(peer)
@@ -342,8 +342,8 @@ func TestIteratorsJumpStop(t *testing.T) {
ps := pslice.New(4, base)
peers := make([]swarm.Address, 0, 12)
- for i := 0; i < 4; i++ {
- for ii := 0; ii < 3; ii++ {
+ for i := range 4 {
+ for range 3 {
a := swarm.RandAddressAt(t, base, i)
peers = append(peers, a)
ps.Add(a)
@@ -435,9 +435,7 @@ func BenchmarkAdd(b *testing.B) {
addrs := swarm.RandAddresses(b, bins*perBin)
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
for _, addr := range addrs {
ps.Add(addr)
}
@@ -450,9 +448,7 @@ func BenchmarkAddBatch(b *testing.B) {
addrs := swarm.RandAddresses(b, bins*perBin)
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
ps.Add(addrs...)
}
}
@@ -464,9 +460,7 @@ func BenchmarkRemove(b *testing.B) {
addrs := swarm.RandAddresses(b, bins*perBin)
ps.Add(addrs...)
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
for _, addr := range addrs {
ps.Remove(addr)
}
@@ -480,9 +474,7 @@ func BenchmarkEachBin(b *testing.B) {
addrs := swarm.RandAddresses(b, bins*perBin)
ps.Add(addrs...)
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
_ = ps.EachBin(func(a swarm.Address, u uint8) (stop bool, jumpToNext bool, err error) {
return false, false, nil
})
@@ -496,9 +488,7 @@ func BenchmarkEachBinRev(b *testing.B) {
addrs := swarm.RandAddresses(b, bins*perBin)
ps.Add(addrs...)
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
+ for b.Loop() {
_ = ps.EachBinRev(func(a swarm.Address, u uint8) (stop bool, jumpToNext bool, err error) {
return false, false, nil
})
diff --git a/pkg/tracing/tracing_test.go b/pkg/tracing/tracing_test.go
index 9c0baa2f685..c6acb330b18 100644
--- a/pkg/tracing/tracing_test.go
+++ b/pkg/tracing/tracing_test.go
@@ -144,7 +144,7 @@ func TestStartSpanFromContext_logger(t *testing.T) {
wantTraceID := span.Context().(jaeger.SpanContext).TraceID()
logger.Info("msg")
- data := make(map[string]interface{})
+ data := make(map[string]any)
if err := json.Unmarshal(buf.Bytes(), &data); err != nil {
t.Fatalf("unexpected error: %v", err)
}
@@ -192,7 +192,7 @@ func TestNewLoggerWithTraceID(t *testing.T) {
wantTraceID := span.Context().(jaeger.SpanContext).TraceID()
logger.Info("msg")
- data := make(map[string]interface{})
+ data := make(map[string]any)
if err := json.Unmarshal(buf.Bytes(), &data); err != nil {
t.Fatalf("unexpected error: %v", err)
}
diff --git a/pkg/transaction/backend.go b/pkg/transaction/backend.go
index 0bf38c35975..075dbfe19b8 100644
--- a/pkg/transaction/backend.go
+++ b/pkg/transaction/backend.go
@@ -14,27 +14,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/transaction/backend"
)
// Backend is the minimum of blockchain backend functions we need.
type Backend interface {
- CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error)
- CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
- HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
- PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
- SuggestGasPrice(ctx context.Context) (*big.Int, error)
- SuggestGasTipCap(ctx context.Context) (*big.Int, error)
- EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error)
- SendTransaction(ctx context.Context, tx *types.Transaction) error
- TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
- TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error)
- BlockNumber(ctx context.Context) (uint64, error)
- BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error)
- BalanceAt(ctx context.Context, address common.Address, block *big.Int) (*big.Int, error)
- NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error)
- FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error)
- ChainID(ctx context.Context) (*big.Int, error)
- Close() error
+ backend.Geth
+ SuggestedFeeAndTip(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error)
}
// IsSynced will check if we are synced with the given blockchain backend. This
@@ -42,11 +28,7 @@ type Backend interface {
// with the given maxDelay as the maximum duration we can be behind the block
// time.
func IsSynced(ctx context.Context, backend Backend, maxDelay time.Duration) (bool, time.Time, error) {
- number, err := backend.BlockNumber(ctx)
- if err != nil {
- return false, time.Time{}, err
- }
- header, err := backend.HeaderByNumber(ctx, big.NewInt(int64(number)))
+ header, err := backend.HeaderByNumber(ctx, nil)
if errors.Is(err, ethereum.NotFound) {
return false, time.Time{}, nil
}
diff --git a/pkg/transaction/backend/backend.go b/pkg/transaction/backend/backend.go
new file mode 100644
index 00000000000..827e7f8176a
--- /dev/null
+++ b/pkg/transaction/backend/backend.go
@@ -0,0 +1,32 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "context"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// Geth is the interface that an ethclient.Client satisfies.
+type Geth interface {
+ BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error)
+ BlockNumber(ctx context.Context) (uint64, error)
+ CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
+ ChainID(ctx context.Context) (*big.Int, error)
+ Close()
+ EstimateGasAtBlock(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (uint64, error)
+ FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error)
+ HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
+ NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error)
+ PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
+ SendTransaction(ctx context.Context, tx *types.Transaction) error
+ SuggestGasTipCap(ctx context.Context) (*big.Int, error)
+ TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error)
+ TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
+}
diff --git a/pkg/transaction/backend_test.go b/pkg/transaction/backend_test.go
index d9d09e55b3c..313db5d9b9f 100644
--- a/pkg/transaction/backend_test.go
+++ b/pkg/transaction/backend_test.go
@@ -22,7 +22,6 @@ func TestIsSynced(t *testing.T) {
maxDelay := 10 * time.Second
now := time.Now().UTC()
ctx := context.Background()
- blockNumber := uint64(100)
t.Run("synced", func(t *testing.T) {
t.Parallel()
@@ -30,12 +29,9 @@ func TestIsSynced(t *testing.T) {
synced, _, err := transaction.IsSynced(
ctx,
backendmock.New(
- backendmock.WithBlockNumberFunc(func(c context.Context) (uint64, error) {
- return blockNumber, nil
- }),
backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- if number.Uint64() != blockNumber {
- return nil, errors.New("called with wrong block number")
+ if number != nil {
+ return nil, errors.New("latest block should be called with nil")
}
return &types.Header{
Time: uint64(now.Unix()),
@@ -58,12 +54,9 @@ func TestIsSynced(t *testing.T) {
synced, _, err := transaction.IsSynced(
ctx,
backendmock.New(
- backendmock.WithBlockNumberFunc(func(c context.Context) (uint64, error) {
- return blockNumber, nil
- }),
backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- if number.Uint64() != blockNumber {
- return nil, errors.New("called with wrong block number")
+ if number != nil {
+ return nil, errors.New("latest block should be called with nil")
}
return &types.Header{
Time: uint64(now.Add(-maxDelay).Unix()),
@@ -87,12 +80,9 @@ func TestIsSynced(t *testing.T) {
_, _, err := transaction.IsSynced(
ctx,
backendmock.New(
- backendmock.WithBlockNumberFunc(func(c context.Context) (uint64, error) {
- return blockNumber, nil
- }),
backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- if number.Uint64() != blockNumber {
- return nil, errors.New("called with wrong block number")
+ if number != nil {
+ return nil, errors.New("latest block should be called with nil")
}
return nil, expectedErr
}),
diff --git a/pkg/transaction/backendmock/backend.go b/pkg/transaction/backendmock/backend.go
index fea46f3936b..e07095fd352 100644
--- a/pkg/transaction/backendmock/backend.go
+++ b/pkg/transaction/backendmock/backend.go
@@ -16,29 +16,20 @@ import (
)
type backendMock struct {
- codeAt func(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error)
callContract func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
sendTransaction func(ctx context.Context, tx *types.Transaction) error
- suggestGasPrice func(ctx context.Context) (*big.Int, error)
+ suggestedFeeAndTip func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error)
suggestGasTipCap func(ctx context.Context) (*big.Int, error)
- estimateGas func(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error)
+ estimateGasAtBlock func(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (gas uint64, err error)
transactionReceipt func(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
pendingNonceAt func(ctx context.Context, account common.Address) (uint64, error)
transactionByHash func(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error)
blockNumber func(ctx context.Context) (uint64, error)
- blockByNumber func(ctx context.Context, number *big.Int) (*types.Block, error)
headerByNumber func(ctx context.Context, number *big.Int) (*types.Header, error)
balanceAt func(ctx context.Context, address common.Address, block *big.Int) (*big.Int, error)
nonceAt func(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error)
}
-func (m *backendMock) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
- if m.codeAt != nil {
- return m.codeAt(ctx, contract, blockNumber)
- }
- return nil, errors.New("not implemented")
-}
-
func (m *backendMock) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
if m.callContract != nil {
return m.callContract(ctx, call, blockNumber)
@@ -46,10 +37,6 @@ func (m *backendMock) CallContract(ctx context.Context, call ethereum.CallMsg, b
return nil, errors.New("not implemented")
}
-func (*backendMock) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) {
- return nil, errors.New("not implemented")
-}
-
func (m *backendMock) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
if m.pendingNonceAt != nil {
return m.pendingNonceAt(ctx, account)
@@ -57,16 +44,16 @@ func (m *backendMock) PendingNonceAt(ctx context.Context, account common.Address
return 0, errors.New("not implemented")
}
-func (m *backendMock) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
- if m.suggestGasPrice != nil {
- return m.suggestGasPrice(ctx)
+func (m *backendMock) SuggestedFeeAndTip(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ if m.suggestedFeeAndTip != nil {
+ return m.suggestedFeeAndTip(ctx, gasPrice, boostPercent)
}
- return nil, errors.New("not implemented")
+ return nil, nil, errors.New("not implemented")
}
-func (m *backendMock) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
- if m.estimateGas != nil {
- return m.estimateGas(ctx, call)
+func (m *backendMock) EstimateGasAtBlock(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (uint64, error) {
+ if m.estimateGasAtBlock != nil {
+ return m.estimateGasAtBlock(ctx, msg, blockNumber)
}
return 0, errors.New("not implemented")
}
@@ -82,10 +69,6 @@ func (*backendMock) FilterLogs(ctx context.Context, query ethereum.FilterQuery)
return nil, errors.New("not implemented")
}
-func (*backendMock) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
- return nil, errors.New("not implemented")
-}
-
func (m *backendMock) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
if m.transactionReceipt != nil {
return m.transactionReceipt(ctx, txHash)
@@ -107,13 +90,6 @@ func (m *backendMock) BlockNumber(ctx context.Context) (uint64, error) {
return 0, errors.New("not implemented")
}
-func (m *backendMock) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
- if m.blockByNumber != nil {
- return m.blockByNumber(ctx, number)
- }
- return nil, errors.New("not implemented")
-}
-
func (m *backendMock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
if m.headerByNumber != nil {
return m.headerByNumber(ctx, number)
@@ -146,9 +122,7 @@ func (m *backendMock) ChainID(ctx context.Context) (*big.Int, error) {
return nil, errors.New("not implemented")
}
-func (m *backendMock) Close() error {
- return nil
-}
+func (m *backendMock) Close() {}
func New(opts ...Option) transaction.Backend {
mock := new(backendMock)
@@ -173,12 +147,6 @@ func WithCallContractFunc(f func(ctx context.Context, call ethereum.CallMsg, blo
})
}
-func WithCodeAtFunc(f func(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error)) Option {
- return optionFunc(func(s *backendMock) {
- s.codeAt = f
- })
-}
-
func WithBalanceAt(f func(ctx context.Context, address common.Address, block *big.Int) (*big.Int, error)) Option {
return optionFunc(func(s *backendMock) {
s.balanceAt = f
@@ -191,9 +159,9 @@ func WithPendingNonceAtFunc(f func(ctx context.Context, account common.Address)
})
}
-func WithSuggestGasPriceFunc(f func(ctx context.Context) (*big.Int, error)) Option {
+func WithSuggestedFeeAndTipFunc(f func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error)) Option {
return optionFunc(func(s *backendMock) {
- s.suggestGasPrice = f
+ s.suggestedFeeAndTip = f
})
}
@@ -203,9 +171,9 @@ func WithSuggestGasTipCapFunc(f func(ctx context.Context) (*big.Int, error)) Opt
})
}
-func WithEstimateGasFunc(f func(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error)) Option {
+func WithEstimateGasAtBlockFunc(f func(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (gas uint64, err error)) Option {
return optionFunc(func(s *backendMock) {
- s.estimateGas = f
+ s.estimateGasAtBlock = f
})
}
@@ -221,12 +189,6 @@ func WithTransactionByHashFunc(f func(ctx context.Context, txHash common.Hash) (
})
}
-func WithBlockByNumberFunc(f func(ctx context.Context, number *big.Int) (*types.Block, error)) Option {
- return optionFunc(func(s *backendMock) {
- s.blockByNumber = f
- })
-}
-
func WithSendTransactionFunc(f func(ctx context.Context, tx *types.Transaction) error) Option {
return optionFunc(func(s *backendMock) {
s.sendTransaction = f
diff --git a/pkg/transaction/backendnoop/backend.go b/pkg/transaction/backendnoop/backend.go
new file mode 100644
index 00000000000..cc09b459bd2
--- /dev/null
+++ b/pkg/transaction/backendnoop/backend.go
@@ -0,0 +1,94 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backendnoop
+
+import (
+ "context"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethersphere/bee/v2/pkg/postage/postagecontract"
+ "github.com/ethersphere/bee/v2/pkg/transaction"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var _ transaction.Backend = (*Backend)(nil)
+
+// Backend is a no-op implementation for transaction.Backend interface.
+// It's used when the blockchain functionality is disabled.
+type Backend struct {
+ chainID int64
+}
+
+// New creates a new no-op backend with the specified chain ID.
+func New(chainID int64) transaction.Backend {
+ return &Backend{
+ chainID: chainID,
+ }
+}
+
+func (b *Backend) Metrics() []prometheus.Collector {
+ return nil
+}
+
+func (b *Backend) CallContract(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) HeaderByNumber(context.Context, *big.Int) (*types.Header, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) PendingNonceAt(context.Context, common.Address) (uint64, error) {
+ return 0, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) SuggestedFeeAndTip(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return nil, nil, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) SuggestGasTipCap(context.Context) (*big.Int, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) EstimateGasAtBlock(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (uint64, error) {
+ return 0, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) SendTransaction(context.Context, *types.Transaction) error {
+ return postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) TransactionReceipt(context.Context, common.Hash) (*types.Receipt, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) TransactionByHash(context.Context, common.Hash) (tx *types.Transaction, isPending bool, err error) {
+ return nil, false, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) BlockNumber(context.Context) (uint64, error) {
+ return 0, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) BalanceAt(context.Context, common.Address, *big.Int) (*big.Int, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) NonceAt(context.Context, common.Address, *big.Int) (uint64, error) {
+ return 0, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+func (b *Backend) ChainID(context.Context) (*big.Int, error) {
+ return big.NewInt(b.chainID), nil
+}
+
+func (b *Backend) Close() {}
diff --git a/pkg/transaction/backendsimulation/backend.go b/pkg/transaction/backendsimulation/backend.go
index 58c183fa0bd..eacddc44f0f 100644
--- a/pkg/transaction/backendsimulation/backend.go
+++ b/pkg/transaction/backendsimulation/backend.go
@@ -7,6 +7,7 @@ package backendsimulation
import (
"context"
"errors"
+ "maps"
"math/big"
"github.com/ethereum/go-ethereum"
@@ -74,43 +75,27 @@ func (m *simulatedBackend) advanceBlock() {
m.blockNumber = block.Number
if block.Receipts != nil {
- for hash, receipt := range block.Receipts {
- m.receipts[hash] = receipt
- }
+ maps.Copy(m.receipts, block.Receipts)
}
if block.NoncesAt != nil {
- for addr, nonce := range block.NoncesAt {
- m.noncesAt[addr] = nonce
- }
+ maps.Copy(m.noncesAt, block.NoncesAt)
}
}
-func (m *simulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
- return nil, errors.New("not implemented")
-}
-
-func (m *simulatedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
- return nil, errors.New("not implemented")
-}
-
func (*simulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
return nil, errors.New("not implemented")
}
-func (*simulatedBackend) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) {
- return nil, errors.New("not implemented")
-}
-
func (m *simulatedBackend) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
return 0, errors.New("not implemented")
}
-func (m *simulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
- return nil, errors.New("not implemented")
+func (m *simulatedBackend) SuggestedFeeAndTip(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return nil, nil, errors.New("not implemented")
}
-func (m *simulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
+func (m *simulatedBackend) EstimateGasAtBlock(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (uint64, error) {
return 0, errors.New("not implemented")
}
@@ -122,10 +107,6 @@ func (*simulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQu
return nil, errors.New("not implemented")
}
-func (*simulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
- return nil, errors.New("not implemented")
-}
-
func (m *simulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
receipt, ok := m.receipts[txHash]
if ok {
@@ -169,6 +150,4 @@ func (m *simulatedBackend) ChainID(ctx context.Context) (*big.Int, error) {
return nil, errors.New("not implemented")
}
-func (m *simulatedBackend) Close() error {
- return nil
-}
+func (m *simulatedBackend) Close() {}
diff --git a/pkg/transaction/event.go b/pkg/transaction/event.go
index 8eb2001ebf7..587b3d86427 100644
--- a/pkg/transaction/event.go
+++ b/pkg/transaction/event.go
@@ -18,7 +18,7 @@ var (
)
// ParseEvent will parse the specified abi event from the given log
-func ParseEvent(a *abi.ABI, eventName string, c interface{}, e types.Log) error {
+func ParseEvent(a *abi.ABI, eventName string, c any, e types.Log) error {
if len(e.Topics) == 0 {
return ErrNoTopic
}
@@ -37,7 +37,7 @@ func ParseEvent(a *abi.ABI, eventName string, c interface{}, e types.Log) error
}
// FindSingleEvent will find the first event of the given kind.
-func FindSingleEvent(abi *abi.ABI, receipt *types.Receipt, contractAddress common.Address, event abi.Event, out interface{}) error {
+func FindSingleEvent(abi *abi.ABI, receipt *types.Receipt, contractAddress common.Address, event abi.Event, out any) error {
if receipt.Status != 1 {
return ErrTransactionReverted
}
diff --git a/pkg/transaction/mock/transaction.go b/pkg/transaction/mock/transaction.go
index d987fd52249..072f47cf8f2 100644
--- a/pkg/transaction/mock/transaction.go
+++ b/pkg/transaction/mock/transaction.go
@@ -171,10 +171,10 @@ type Call struct {
to common.Address
result []byte
method string
- params []interface{}
+ params []any
}
-func ABICall(abi *abi.ABI, to common.Address, result []byte, method string, params ...interface{}) Call {
+func ABICall(abi *abi.ABI, to common.Address, result []byte, method string, params ...any) Call {
return Call{
to: to,
abi: abi,
@@ -216,11 +216,11 @@ func WithABICallSequence(calls ...Call) Option {
})
}
-func WithABICall(abi *abi.ABI, to common.Address, result []byte, method string, params ...interface{}) Option {
+func WithABICall(abi *abi.ABI, to common.Address, result []byte, method string, params ...any) Option {
return WithABICallSequence(ABICall(abi, to, result, method, params...))
}
-func WithABISend(abi *abi.ABI, txHash common.Hash, expectedAddress common.Address, expectedValue *big.Int, method string, params ...interface{}) Option {
+func WithABISend(abi *abi.ABI, txHash common.Hash, expectedAddress common.Address, expectedValue *big.Int, method string, params ...any) Option {
return optionFunc(func(s *transactionServiceMock) {
s.send = func(ctx context.Context, request *transaction.TxRequest, boost int) (common.Hash, error) {
data, err := abi.Pack(method, params...)
diff --git a/pkg/transaction/transaction.go b/pkg/transaction/transaction.go
index 2d911e27261..cc88e7a3ab4 100644
--- a/pkg/transaction/transaction.go
+++ b/pkg/transaction/transaction.go
@@ -14,6 +14,8 @@ import (
"sync"
"time"
+ "context"
+
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
@@ -23,14 +25,12 @@ import (
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/sctx"
"github.com/ethersphere/bee/v2/pkg/storage"
- "golang.org/x/net/context"
)
// loggerName is the tree path name of the logger for this package.
const loggerName = "transaction"
const (
- noncePrefix = "transaction_nonce_"
storedTransactionPrefix = "transaction_stored_"
pendingTransactionPrefix = "transaction_pending_"
)
@@ -41,14 +41,11 @@ var (
ErrTransactionReverted = errors.New("transaction reverted")
ErrUnknownTransaction = errors.New("unknown transaction")
ErrAlreadyImported = errors.New("already imported")
- ErrEIP1559NotSupported = errors.New("network does not appear to support EIP-1559 (no baseFee)")
)
const (
- DefaultGasLimit = 1_000_000
- DefaultTipBoostPercent = 25
- MinimumGasTipCap = 1_500_000_000 // 1.5 Gwei
- RedistributionTipBoostPercent = 50
+ DefaultGasLimit = 1_000_000
+ DefaultTipBoostPercent = 25
)
// TxRequest describes a request for a transaction that can be executed.
@@ -144,8 +141,7 @@ func NewService(logger log.Logger, overlayEthAddress common.Address, backend Bac
monitor: monitor,
}
- err = t.waitForAllPendingTx()
- if err != nil {
+ if err = t.waitForAllPendingTx(); err != nil {
return nil, err
}
@@ -226,9 +222,7 @@ func (t *transactionService) Send(ctx context.Context, request *TxRequest, boost
}
func (t *transactionService) waitForPendingTx(txHash common.Hash) {
- t.wg.Add(1)
- go func() {
- defer t.wg.Done()
+ t.wg.Go(func() {
switch _, err := t.WaitForReceipt(t.ctx, txHash); err {
case nil:
t.logger.Info("pending transaction confirmed", "tx", txHash)
@@ -243,7 +237,7 @@ func (t *transactionService) waitForPendingTx(txHash common.Hash) {
t.logger.Error(err, "waiting for pending transaction failed", "tx", txHash)
}
}
- }()
+ })
}
func (t *transactionService) Call(ctx context.Context, request *TxRequest) ([]byte, error) {
@@ -279,11 +273,11 @@ func (t *transactionService) StoredTransaction(txHash common.Hash) (*StoredTrans
func (t *transactionService) prepareTransaction(ctx context.Context, request *TxRequest, nonce uint64, boostPercent int) (tx *types.Transaction, err error) {
var gasLimit uint64
if request.GasLimit == 0 {
- gasLimit, err = t.backend.EstimateGas(ctx, ethereum.CallMsg{
+ gasLimit, err = t.backend.EstimateGasAtBlock(ctx, ethereum.CallMsg{
From: t.sender,
To: request.To,
Data: request.Data,
- })
+ }, nil) // nil for latest block
if err != nil {
t.logger.Debug("estimate gas failed", "error", err)
gasLimit = request.MinEstimatedGasLimit
@@ -309,7 +303,7 @@ func (t *transactionService) prepareTransaction(ctx context.Context, request *Tx
notice that gas price does not exceed 20 as defined by max fee.
*/
- gasFeeCap, gasTipCap, err := t.suggestedFeeAndTip(ctx, request.GasPrice, boostPercent)
+ gasFeeCap, gasTipCap, err := t.backend.SuggestedFeeAndTip(ctx, request.GasPrice, boostPercent)
if err != nil {
return nil, err
}
@@ -326,51 +320,6 @@ func (t *transactionService) prepareTransaction(ctx context.Context, request *Tx
}), nil
}
-func (t *transactionService) suggestedFeeAndTip(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
- gasTipCap, err := t.backend.SuggestGasTipCap(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- multiplier := big.NewInt(int64(boostPercent) + 100)
- gasTipCap = new(big.Int).Div(new(big.Int).Mul(gasTipCap, multiplier), big.NewInt(100))
-
- minimumTip := big.NewInt(MinimumGasTipCap)
- if gasTipCap.Cmp(minimumTip) < 0 {
- gasTipCap = new(big.Int).Set(minimumTip)
- }
-
- var gasFeeCap *big.Int
-
- if gasPrice == nil {
- latestBlockHeader, err := t.backend.HeaderByNumber(ctx, nil)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to get latest block: %w", err)
- }
-
- if latestBlockHeader.BaseFee == nil {
- return nil, nil, ErrEIP1559NotSupported
- }
-
- // gasFeeCap = (2 * baseFee) + gasTipCap
- gasFeeCap = new(big.Int).Add(
- new(big.Int).Mul(latestBlockHeader.BaseFee, big.NewInt(2)),
- gasTipCap,
- )
- } else {
- gasFeeCap = new(big.Int).Set(gasPrice)
- }
-
- if gasTipCap.Cmp(gasFeeCap) > 0 {
- t.logger.Warning("gas tip cap is higher than gas fee cap, using gas fee cap as gas tip cap", "gas_tip_cap", gasTipCap, "gas_fee_cap", gasFeeCap)
- gasTipCap = new(big.Int).Set(gasFeeCap)
- }
-
- t.logger.Debug("prepare transaction", "gas_max_fee", gasFeeCap, "gas_max_tip", gasTipCap)
-
- return gasFeeCap, gasTipCap, nil
-}
-
func storedTransactionKey(txHash common.Hash) string {
return fmt.Sprintf("%s%x", storedTransactionPrefix, txHash)
}
@@ -394,7 +343,7 @@ func (t *transactionService) nextNonce(ctx context.Context) (uint64, error) {
// PendingNonceAt returns the nonce we should use, but we will
// compare this to our pending tx list, therefore the -1.
- var maxNonce = onchainNonce - 1
+ maxNonce := onchainNonce - 1
for _, txHash := range pendingTxs {
trx, _, err := t.backend.TransactionByHash(ctx, txHash)
if err != nil {
@@ -441,7 +390,7 @@ func (t *transactionService) WatchSentTransaction(txHash common.Hash) (<-chan ty
}
func (t *transactionService) PendingTransactions() ([]common.Hash, error) {
- var txHashes = make([]common.Hash, 0)
+ txHashes := make([]common.Hash, 0)
err := t.store.Iterate(pendingTransactionPrefix, func(key, value []byte) (stop bool, err error) {
txHash := common.HexToHash(strings.TrimPrefix(string(key), pendingTransactionPrefix))
txHashes = append(txHashes, txHash)
@@ -491,7 +440,7 @@ func (t *transactionService) ResendTransaction(ctx context.Context, txHash commo
return err
}
- gasFeeCap, gasTipCap, err := t.suggestedFeeAndTip(ctx, sctx.GetGasPrice(ctx), storedTransaction.GasTipBoost)
+ gasFeeCap, gasTipCap, err := t.backend.SuggestedFeeAndTip(ctx, sctx.GetGasPrice(ctx), storedTransaction.GasTipBoost)
if err != nil {
return err
}
@@ -531,7 +480,7 @@ func (t *transactionService) CancelTransaction(ctx context.Context, originalTxHa
return common.Hash{}, err
}
- gasFeeCap, gasTipCap, err := t.suggestedFeeAndTip(ctx, sctx.GetGasPrice(ctx), 0)
+ gasFeeCap, gasTipCap, err := t.backend.SuggestedFeeAndTip(ctx, sctx.GetGasPrice(ctx), 0)
if err != nil {
return common.Hash{}, err
}
@@ -645,9 +594,9 @@ func (t *transactionService) UnwrapABIError(ctx context.Context, req *TxRequest,
continue
}
- values, ok := data.([]interface{})
+ values, ok := data.([]any)
if !ok {
- values = make([]interface{}, len(abiError.Inputs))
+ values = make([]any, len(abiError.Inputs))
for i := range values {
values[i] = "?"
}
diff --git a/pkg/transaction/transaction_test.go b/pkg/transaction/transaction_test.go
index d8b830e5780..e9f4e385c32 100644
--- a/pkg/transaction/transaction_test.go
+++ b/pkg/transaction/transaction_test.go
@@ -31,7 +31,7 @@ import (
)
var (
- minimumTip = big.NewInt(transaction.MinimumGasTipCap)
+ minimumTip = big.NewInt(1_500_000_000)
baseFee = big.NewInt(3_000_000_000)
)
@@ -75,6 +75,39 @@ func signerMockForTransaction(t *testing.T, signedTx *types.Transaction, sender
)
}
+func checkStoredTransaction(t *testing.T, transactionService transaction.Service, txHash common.Hash, request *transaction.TxRequest, recipient common.Address, gasLimit uint64, gasPrice *big.Int, nonce uint64) {
+ t.Helper()
+
+ storedTransaction, err := transactionService.StoredTransaction(txHash)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if storedTransaction.To == nil || *storedTransaction.To != recipient {
+ t.Fatalf("got wrong recipient in stored transaction. wanted %x, got %x", recipient, storedTransaction.To)
+ }
+
+ if !bytes.Equal(storedTransaction.Data, request.Data) {
+ t.Fatalf("got wrong data in stored transaction. wanted %x, got %x", request.Data, storedTransaction.Data)
+ }
+
+ if storedTransaction.Description != request.Description {
+ t.Fatalf("got wrong description in stored transaction. wanted %x, got %x", request.Description, storedTransaction.Description)
+ }
+
+ if storedTransaction.GasLimit != gasLimit {
+ t.Fatalf("got wrong gas limit in stored transaction. wanted %d, got %d", gasLimit, storedTransaction.GasLimit)
+ }
+
+ if gasPrice.Cmp(storedTransaction.GasPrice) != 0 {
+ t.Fatalf("got wrong gas price in stored transaction. wanted %d, got %d", gasPrice, storedTransaction.GasPrice)
+ }
+
+ if storedTransaction.Nonce != nonce {
+ t.Fatalf("got wrong nonce in stored transaction. wanted %d, got %d", nonce, storedTransaction.Nonce)
+ }
+}
+
func TestTransactionSend(t *testing.T) {
t.Parallel()
@@ -118,11 +151,11 @@ func TestTransactionSend(t *testing.T) {
}
return nil
}),
- backendmock.WithEstimateGasFunc(func(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
- if !bytes.Equal(call.To.Bytes(), recipient.Bytes()) {
- t.Fatalf("estimating with wrong recipient. wanted %x, got %x", recipient, call.To)
+ backendmock.WithEstimateGasAtBlockFunc(func(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (gas uint64, err error) {
+ if !bytes.Equal(msg.To.Bytes(), recipient.Bytes()) {
+ t.Fatalf("estimating with wrong recipient. wanted %x, got %x", recipient, msg.To)
}
- if !bytes.Equal(call.Data, txData) {
+ if !bytes.Equal(msg.Data, txData) {
t.Fatal("estimating with wrong data")
}
return estimatedGasLimit, nil
@@ -130,11 +163,8 @@ func TestTransactionSend(t *testing.T) {
backendmock.WithPendingNonceAtFunc(func(ctx context.Context, account common.Address) (uint64, error) {
return nonce - 1, nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return suggestedGasTip, nil
- }),
- backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- return &types.Header{BaseFee: baseFee}, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return gasFeeCap, suggestedGasTip, nil
}),
),
signerMockForTransaction(t, signedTx, sender, chainID),
@@ -160,34 +190,7 @@ func TestTransactionSend(t *testing.T) {
t.Fatal("returning wrong transaction hash")
}
- storedTransaction, err := transactionService.StoredTransaction(txHash)
- if err != nil {
- t.Fatal(err)
- }
-
- if storedTransaction.To == nil || *storedTransaction.To != recipient {
- t.Fatalf("got wrong recipient in stored transaction. wanted %x, got %x", recipient, storedTransaction.To)
- }
-
- if !bytes.Equal(storedTransaction.Data, request.Data) {
- t.Fatalf("got wrong data in stored transaction. wanted %x, got %x", request.Data, storedTransaction.Data)
- }
-
- if storedTransaction.Description != request.Description {
- t.Fatalf("got wrong description in stored transaction. wanted %x, got %x", request.Description, storedTransaction.Description)
- }
-
- if storedTransaction.GasLimit != gasLimit {
- t.Fatalf("got wrong gas limit in stored transaction. wanted %d, got %d", gasLimit, storedTransaction.GasLimit)
- }
-
- if gasFeeCap.Cmp(storedTransaction.GasPrice) != 0 {
- t.Fatalf("got wrong gas price in stored transaction. wanted %d, got %d", gasFeeCap, storedTransaction.GasPrice)
- }
-
- if storedTransaction.Nonce != nonce {
- t.Fatalf("got wrong nonce in stored transaction. wanted %d, got %d", nonce, storedTransaction.Nonce)
- }
+ checkStoredTransaction(t, transactionService, txHash, request, recipient, gasLimit, gasFeeCap, nonce)
pending, err := transactionService.PendingTransactions()
if err != nil {
@@ -231,17 +234,14 @@ func TestTransactionSend(t *testing.T) {
}
return nil
}),
- backendmock.WithEstimateGasFunc(func(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
+ backendmock.WithEstimateGasAtBlockFunc(func(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (gas uint64, err error) {
return 0, errors.New("estimate failure")
}),
backendmock.WithPendingNonceAtFunc(func(ctx context.Context, account common.Address) (uint64, error) {
return nonce - 1, nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return suggestedGasTip, nil
- }),
- backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- return &types.Header{BaseFee: baseFee}, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return gasFeeCap, suggestedGasTip, nil
}),
),
signerMockForTransaction(t, signedTx, sender, chainID),
@@ -267,34 +267,7 @@ func TestTransactionSend(t *testing.T) {
t.Fatal("returning wrong transaction hash")
}
- storedTransaction, err := transactionService.StoredTransaction(txHash)
- if err != nil {
- t.Fatal(err)
- }
-
- if storedTransaction.To == nil || *storedTransaction.To != recipient {
- t.Fatalf("got wrong recipient in stored transaction. wanted %x, got %x", recipient, storedTransaction.To)
- }
-
- if !bytes.Equal(storedTransaction.Data, request.Data) {
- t.Fatalf("got wrong data in stored transaction. wanted %x, got %x", request.Data, storedTransaction.Data)
- }
-
- if storedTransaction.Description != request.Description {
- t.Fatalf("got wrong description in stored transaction. wanted %x, got %x", request.Description, storedTransaction.Description)
- }
-
- if storedTransaction.GasLimit != gasLimit {
- t.Fatalf("got wrong gas limit in stored transaction. wanted %d, got %d", gasLimit, storedTransaction.GasLimit)
- }
-
- if gasFeeCap.Cmp(storedTransaction.GasPrice) != 0 {
- t.Fatalf("got wrong gas price in stored transaction. wanted %d, got %d", gasFeeCap, storedTransaction.GasPrice)
- }
-
- if storedTransaction.Nonce != nonce {
- t.Fatalf("got wrong nonce in stored transaction. wanted %d, got %d", nonce, storedTransaction.Nonce)
- }
+ checkStoredTransaction(t, transactionService, txHash, request, recipient, gasLimit, gasFeeCap, nonce)
pending, err := transactionService.PendingTransactions()
if err != nil {
@@ -341,11 +314,11 @@ func TestTransactionSend(t *testing.T) {
}
return nil
}),
- backendmock.WithEstimateGasFunc(func(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
- if !bytes.Equal(call.To.Bytes(), recipient.Bytes()) {
- t.Fatalf("estimating with wrong recipient. wanted %x, got %x", recipient, call.To)
+ backendmock.WithEstimateGasAtBlockFunc(func(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (gas uint64, err error) {
+ if !bytes.Equal(msg.To.Bytes(), recipient.Bytes()) {
+ t.Fatalf("estimating with wrong recipient. wanted %x, got %x", recipient, msg.To)
}
- if !bytes.Equal(call.Data, txData) {
+ if !bytes.Equal(msg.Data, txData) {
t.Fatal("estimating with wrong data")
}
return estimatedGasLimit, nil
@@ -353,11 +326,8 @@ func TestTransactionSend(t *testing.T) {
backendmock.WithPendingNonceAtFunc(func(ctx context.Context, account common.Address) (uint64, error) {
return nonce - 1, nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return suggestedGasTip, nil
- }),
- backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- return &types.Header{BaseFee: baseFee}, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return gasFeeCapWithBoost, suggestedGasTip, nil
}),
),
signerMockForTransaction(t, signedTx, sender, chainID),
@@ -383,34 +353,7 @@ func TestTransactionSend(t *testing.T) {
t.Fatal("returning wrong transaction hash")
}
- storedTransaction, err := transactionService.StoredTransaction(txHash)
- if err != nil {
- t.Fatal(err)
- }
-
- if storedTransaction.To == nil || *storedTransaction.To != recipient {
- t.Fatalf("got wrong recipient in stored transaction. wanted %x, got %x", recipient, storedTransaction.To)
- }
-
- if !bytes.Equal(storedTransaction.Data, request.Data) {
- t.Fatalf("got wrong data in stored transaction. wanted %x, got %x", request.Data, storedTransaction.Data)
- }
-
- if storedTransaction.Description != request.Description {
- t.Fatalf("got wrong description in stored transaction. wanted %x, got %x", request.Description, storedTransaction.Description)
- }
-
- if storedTransaction.GasLimit != gasLimit {
- t.Fatalf("got wrong gas limit in stored transaction. wanted %d, got %d", gasLimit, storedTransaction.GasLimit)
- }
-
- if gasFeeCapWithBoost.Cmp(storedTransaction.GasPrice) != 0 {
- t.Fatalf("got wrong gas price in stored transaction. wanted %d, got %d", gasFeeCapWithBoost, storedTransaction.GasPrice)
- }
-
- if storedTransaction.Nonce != nonce {
- t.Fatalf("got wrong nonce in stored transaction. wanted %d, got %d", nonce, storedTransaction.Nonce)
- }
+ checkStoredTransaction(t, transactionService, txHash, request, recipient, gasLimit, gasFeeCapWithBoost, nonce)
pending, err := transactionService.PendingTransactions()
if err != nil {
@@ -453,11 +396,11 @@ func TestTransactionSend(t *testing.T) {
}
return nil
}),
- backendmock.WithEstimateGasFunc(func(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
- if !bytes.Equal(call.To.Bytes(), recipient.Bytes()) {
- t.Fatalf("estimating with wrong recipient. wanted %x, got %x", recipient, call.To)
+ backendmock.WithEstimateGasAtBlockFunc(func(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (gas uint64, err error) {
+ if !bytes.Equal(msg.To.Bytes(), recipient.Bytes()) {
+ t.Fatalf("estimating with wrong recipient. wanted %x, got %x", recipient, msg.To)
}
- if !bytes.Equal(call.Data, txData) {
+ if !bytes.Equal(msg.Data, txData) {
t.Fatal("estimating with wrong data")
}
return estimatedGasLimit, nil
@@ -465,11 +408,8 @@ func TestTransactionSend(t *testing.T) {
backendmock.WithPendingNonceAtFunc(func(ctx context.Context, account common.Address) (uint64, error) {
return nonce, nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return suggestedGasTip, nil
- }),
- backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- return &types.Header{BaseFee: baseFee}, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return gasFeeCap, suggestedGasTip, nil
}),
),
signerMockForTransaction(t, signedTx, sender, chainID),
@@ -521,7 +461,7 @@ func TestTransactionSend(t *testing.T) {
}
return nil
}),
- backendmock.WithEstimateGasFunc(func(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
+ backendmock.WithEstimateGasAtBlockFunc(func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) (gas uint64, err error) {
if !bytes.Equal(call.To.Bytes(), recipient.Bytes()) {
t.Fatalf("estimating with wrong recipient. wanted %x, got %x", recipient, call.To)
}
@@ -533,11 +473,8 @@ func TestTransactionSend(t *testing.T) {
backendmock.WithPendingNonceAtFunc(func(ctx context.Context, account common.Address) (uint64, error) {
return nextNonce, nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return suggestedGasTip, nil
- }),
- backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- return &types.Header{BaseFee: baseFee}, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return gasFeeCap, suggestedGasTip, nil
}),
),
signerMockForTransaction(t, signedTx, sender, chainID),
@@ -590,7 +527,7 @@ func TestTransactionSend(t *testing.T) {
}
return nil
}),
- backendmock.WithEstimateGasFunc(func(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
+ backendmock.WithEstimateGasAtBlockFunc(func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) (gas uint64, err error) {
if !bytes.Equal(call.To.Bytes(), recipient.Bytes()) {
t.Fatalf("estimating with wrong recipient. wanted %x, got %x", recipient, call.To)
}
@@ -602,11 +539,8 @@ func TestTransactionSend(t *testing.T) {
backendmock.WithPendingNonceAtFunc(func(ctx context.Context, account common.Address) (uint64, error) {
return nextNonce, nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return suggestedGasTip, nil
- }),
- backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- return &types.Header{BaseFee: baseFee}, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return customGasFeeCap, customGasFeeCap, nil
}),
),
signerMockForTransaction(t, signedTx, sender, chainID),
@@ -748,11 +682,8 @@ func TestTransactionResend(t *testing.T) {
}
return nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return gasTip, nil
- }),
- backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- return &types.Header{BaseFee: baseFee}, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return gasFeeCap, gasTip, nil
}),
),
signerMockForTransaction(t, signedTx, recipient, chainID),
@@ -838,11 +769,8 @@ func TestTransactionCancel(t *testing.T) {
}
return nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return gasTip, nil
- }),
- backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
- return &types.Header{BaseFee: baseFee}, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return fee, minimumTip, nil
}),
),
signerMockForTransaction(t, cancelTx, recipient, chainID),
@@ -879,7 +807,7 @@ func TestTransactionCancel(t *testing.T) {
Value: big.NewInt(0),
Gas: 21000,
GasFeeCap: gasFeeCap,
- GasTipCap: gasTip,
+ GasTipCap: gasTipCap,
Data: []byte{},
})
@@ -891,8 +819,8 @@ func TestTransactionCancel(t *testing.T) {
}
return nil
}),
- backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
- return gasTip, nil
+ backendmock.WithSuggestedFeeAndTipFunc(func(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ return gasFee, gasTip, nil
}),
),
signerMockForTransaction(t, cancelTx, recipient, chainID),
@@ -924,9 +852,9 @@ type rpcAPIError struct {
err string
}
-func (e *rpcAPIError) ErrorCode() int { return e.code }
-func (e *rpcAPIError) Error() string { return e.msg }
-func (e *rpcAPIError) ErrorData() interface{} { return e.err }
+func (e *rpcAPIError) ErrorCode() int { return e.code }
+func (e *rpcAPIError) Error() string { return e.msg }
+func (e *rpcAPIError) ErrorData() any { return e.err }
var _ rpc.DataError = (*rpcAPIError)(nil)
diff --git a/pkg/transaction/wrapped/fee.go b/pkg/transaction/wrapped/fee.go
new file mode 100644
index 00000000000..34fe2cc60d5
--- /dev/null
+++ b/pkg/transaction/wrapped/fee.go
@@ -0,0 +1,85 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package wrapped
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+)
+
+const (
+ percentageDivisor = 100
+ baseFeeMultiplier = 2
+)
+
+var (
+ ErrEIP1559NotSupported = errors.New("network does not appear to support EIP-1559 (no baseFee)")
+)
+
+// SuggestedFeeAndTip calculates the recommended gasFeeCap (maxFeePerGas) and gasTipCap (maxPriorityFeePerGas) for a transaction.
+// If gasPrice is provided (legacy mode):
+// - On EIP-1559 networks: gasFeeCap = gasPrice; gasTipCap = max(gasPrice - baseFee, minimumTip) to respect the total cap while enforcing a tip floor where possible.
+// - On pre-EIP-1559 networks: returns (gasPrice, gasPrice) for legacy transaction compatibility.
+//
+// If gasPrice is nil: Uses suggested tip with optional boost, enforces minimum, and sets gasFeeCap = 2 * baseFee + gasTipCap.
+func (b *wrappedBackend) SuggestedFeeAndTip(ctx context.Context, gasPrice *big.Int, boostPercent int) (*big.Int, *big.Int, error) {
+ if gasPrice != nil {
+ latestBlockHeader, err := b.backend.HeaderByNumber(ctx, nil)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get latest block header: %w", err)
+ }
+ if latestBlockHeader == nil || latestBlockHeader.BaseFee == nil {
+ return new(big.Int).Set(gasPrice), new(big.Int).Set(gasPrice), nil
+ }
+
+ baseFee := latestBlockHeader.BaseFee
+ if gasPrice.Cmp(baseFee) < 0 {
+ return nil, nil, fmt.Errorf("specified gas price %s is below current base fee %s", gasPrice, baseFee)
+ }
+
+ // nominal tip = gasPrice - baseFee
+ gasTipCap := new(big.Int).Sub(gasPrice, baseFee)
+ gasFeeCap := new(big.Int).Set(gasPrice)
+
+ return gasFeeCap, gasTipCap, nil
+ }
+
+ gasTipCap, err := b.backend.SuggestGasTipCap(ctx)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to suggest gas tip cap: %w", err)
+ }
+ gasTipCap = new(big.Int).Set(gasTipCap)
+
+ if boostPercent != 0 {
+ if boostPercent < 0 {
+ return nil, nil, fmt.Errorf("negative boostPercent (%d) not allowed", boostPercent)
+ }
+ // multiplier: 100 + boostPercent (e.g., 110 for 10% boost)
+ multiplier := new(big.Int).Add(big.NewInt(int64(percentageDivisor)), big.NewInt(int64(boostPercent)))
+ // gasTipCap = gasTipCap * (100 + boostPercent) / 100
+ gasTipCap.Mul(gasTipCap, multiplier).Div(gasTipCap, big.NewInt(int64(percentageDivisor)))
+ }
+
+ minimumTip := big.NewInt(b.minimumGasTipCap)
+ if gasTipCap.Cmp(minimumTip) < 0 {
+ gasTipCap.Set(minimumTip)
+ }
+
+ latestBlockHeader, err := b.backend.HeaderByNumber(ctx, nil)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get latest block header: %w", err)
+ }
+ if latestBlockHeader == nil || latestBlockHeader.BaseFee == nil {
+ return nil, nil, ErrEIP1559NotSupported
+ }
+
+ // gasFeeCap = (2 * baseFee) + gasTipCap
+ gasFeeCap := new(big.Int).Mul(latestBlockHeader.BaseFee, big.NewInt(int64(baseFeeMultiplier)))
+ gasFeeCap.Add(gasFeeCap, gasTipCap)
+
+ return gasFeeCap, gasTipCap, nil
+}
diff --git a/pkg/transaction/wrapped/fee_test.go b/pkg/transaction/wrapped/fee_test.go
new file mode 100644
index 00000000000..41afb69f28f
--- /dev/null
+++ b/pkg/transaction/wrapped/fee_test.go
@@ -0,0 +1,135 @@
+// Copyright 2025 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package wrapped_test
+
+import (
+ "context"
+ "errors"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethersphere/bee/v2/pkg/transaction/backendmock"
+ "github.com/ethersphere/bee/v2/pkg/transaction/wrapped"
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestSuggestedFeeAndTip(t *testing.T) {
+ t.Parallel()
+
+ var (
+ ctx = context.Background()
+ minimumGasTipCap = uint64(10)
+ baseFee = big.NewInt(100)
+ )
+
+ testCases := []struct {
+ name string
+ gasPrice *big.Int
+ boostPercent int
+ mockSuggestGasTip *big.Int
+ mockSuggestGasErr error
+ mockHeader *types.Header
+ mockHeaderErr error
+ wantGasFeeCap *big.Int
+ wantGasTipCap *big.Int
+ wantErr error
+ }{
+ {
+ name: "with gas price",
+ gasPrice: big.NewInt(1000),
+ wantGasFeeCap: big.NewInt(1000),
+ wantGasTipCap: big.NewInt(1000),
+ },
+ {
+ name: "with gas price and base fee",
+ gasPrice: big.NewInt(1000),
+ mockHeader: &types.Header{BaseFee: baseFee},
+ wantGasFeeCap: big.NewInt(1000),
+ wantGasTipCap: big.NewInt(900),
+ },
+ {
+ name: "suggest tip error",
+ mockSuggestGasErr: errors.New("suggest tip error"),
+ wantErr: errors.New("failed to suggest gas tip cap: suggest tip error"),
+ },
+ {
+ name: "header error",
+ mockSuggestGasTip: big.NewInt(20),
+ mockHeaderErr: errors.New("header error"),
+ wantErr: errors.New("failed to get latest block header: header error"),
+ },
+ {
+ name: "no base fee",
+ mockSuggestGasTip: big.NewInt(20),
+ mockHeader: &types.Header{},
+ wantErr: wrapped.ErrEIP1559NotSupported,
+ },
+ {
+ name: "suggested tip > minimum",
+ mockSuggestGasTip: big.NewInt(20),
+ mockHeader: &types.Header{BaseFee: baseFee},
+ wantGasFeeCap: big.NewInt(220), // 2*100 + 20
+ wantGasTipCap: big.NewInt(20),
+ },
+ {
+ name: "suggested tip < minimum",
+ mockSuggestGasTip: big.NewInt(5),
+ mockHeader: &types.Header{BaseFee: baseFee},
+ wantGasFeeCap: big.NewInt(210), // 2*100 + 10
+ wantGasTipCap: big.NewInt(10),
+ },
+ {
+ name: "with boost",
+ boostPercent: 10,
+ mockSuggestGasTip: big.NewInt(20),
+ mockHeader: &types.Header{BaseFee: baseFee},
+ wantGasFeeCap: big.NewInt(222), // 2*100 + 22
+ wantGasTipCap: big.NewInt(22), // 20 * 1.1
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ backend := wrapped.NewBackend(
+ backendmock.New(
+ backendmock.WithSuggestGasTipCapFunc(func(ctx context.Context) (*big.Int, error) {
+ return tc.mockSuggestGasTip, tc.mockSuggestGasErr
+ }),
+ backendmock.WithHeaderbyNumberFunc(func(ctx context.Context, number *big.Int) (*types.Header, error) {
+ return tc.mockHeader, tc.mockHeaderErr
+ }),
+ ),
+ minimumGasTipCap,
+ )
+
+ gasFeeCap, gasTipCap, err := backend.SuggestedFeeAndTip(ctx, tc.gasPrice, tc.boostPercent)
+
+ if tc.wantErr != nil {
+ if err == nil {
+ t.Fatal("expected error but got none")
+ }
+ if err.Error() != tc.wantErr.Error() {
+ t.Fatalf("unexpected error. want %v, got %v", tc.wantErr, err)
+ }
+ return
+ }
+
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ if diff := cmp.Diff(tc.wantGasFeeCap.String(), gasFeeCap.String()); diff != "" {
+ t.Errorf("gasFeeCap mismatch (-want +got):\n%s", diff)
+ }
+
+ if diff := cmp.Diff(tc.wantGasTipCap.String(), gasTipCap.String()); diff != "" {
+ t.Errorf("gasTipCap mismatch (-want +got):\n%s", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/transaction/wrapped/metrics.go b/pkg/transaction/wrapped/metrics.go
index cb7ce3967c3..a5cfaa554a0 100644
--- a/pkg/transaction/wrapped/metrics.go
+++ b/pkg/transaction/wrapped/metrics.go
@@ -18,16 +18,14 @@ type metrics struct {
BlockNumberCalls prometheus.Counter
BlockHeaderCalls prometheus.Counter
BalanceCalls prometheus.Counter
- CodeAtCalls prometheus.Counter
NonceAtCalls prometheus.Counter
PendingNonceCalls prometheus.Counter
CallContractCalls prometheus.Counter
- SuggestGasPriceCalls prometheus.Counter
+ SuggestGasTipCapCalls prometheus.Counter
EstimateGasCalls prometheus.Counter
SendTransactionCalls prometheus.Counter
FilterLogsCalls prometheus.Counter
ChainIDCalls prometheus.Counter
- BlockByNumberCalls prometheus.Counter
}
func newMetrics() metrics {
@@ -76,12 +74,6 @@ func newMetrics() metrics {
Name: "calls_balance",
Help: "Count of eth_getBalance rpc calls",
}),
- CodeAtCalls: prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: m.Namespace,
- Subsystem: subsystem,
- Name: "calls_code_at",
- Help: "Count of eth_getCode rpc calls",
- }),
NonceAtCalls: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
@@ -100,11 +92,11 @@ func newMetrics() metrics {
Name: "calls_eth_call",
Help: "Count of eth_call rpc calls",
}),
- SuggestGasPriceCalls: prometheus.NewCounter(prometheus.CounterOpts{
+ SuggestGasTipCapCalls: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
- Name: "calls_suggest_gasprice",
- Help: "Count of eth_suggestGasPrice rpc calls",
+ Name: "calls_suggest_gas_tip_cap",
+ Help: "Count of eth_maxPriorityFeePerGas rpc calls",
}),
EstimateGasCalls: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
@@ -130,12 +122,6 @@ func newMetrics() metrics {
Name: "calls_chain_id",
Help: "Count of eth_chainId rpc calls",
}),
- BlockByNumberCalls: prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: m.Namespace,
- Subsystem: subsystem,
- Name: "calls_block_by_number",
- Help: "Count of eth_getBlockByNumber rpc calls",
- }),
}
}
diff --git a/pkg/transaction/wrapped/wrapped.go b/pkg/transaction/wrapped/wrapped.go
index 87ec74da7e9..1d4e452aef3 100644
--- a/pkg/transaction/wrapped/wrapped.go
+++ b/pkg/transaction/wrapped/wrapped.go
@@ -12,21 +12,25 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethersphere/bee/v2/pkg/transaction"
+ "github.com/ethersphere/bee/v2/pkg/transaction/backend"
)
-var _ transaction.Backend = (*wrappedBackend)(nil)
+var (
+ _ transaction.Backend = (*wrappedBackend)(nil)
+)
type wrappedBackend struct {
- backend *ethclient.Client
- metrics metrics
+ backend backend.Geth
+ metrics metrics
+ minimumGasTipCap int64
}
-func NewBackend(backend *ethclient.Client) transaction.Backend {
+func NewBackend(backend backend.Geth, minimumGasTipCap uint64) transaction.Backend {
return &wrappedBackend{
- backend: backend,
- metrics: newMetrics(),
+ backend: backend,
+ minimumGasTipCap: int64(minimumGasTipCap),
+ metrics: newMetrics(),
}
}
@@ -102,17 +106,6 @@ func (b *wrappedBackend) NonceAt(ctx context.Context, account common.Address, bl
return nonce, nil
}
-func (b *wrappedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
- b.metrics.TotalRPCCalls.Inc()
- b.metrics.CodeAtCalls.Inc()
- code, err := b.backend.CodeAt(ctx, contract, blockNumber)
- if err != nil {
- b.metrics.TotalRPCErrors.Inc()
- return nil, err
- }
- return code, nil
-}
-
func (b *wrappedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
b.metrics.TotalRPCCalls.Inc()
b.metrics.CallContractCalls.Inc()
@@ -135,20 +128,9 @@ func (b *wrappedBackend) PendingNonceAt(ctx context.Context, account common.Addr
return nonce, nil
}
-func (b *wrappedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
- b.metrics.TotalRPCCalls.Inc()
- b.metrics.SuggestGasPriceCalls.Inc()
- gasPrice, err := b.backend.SuggestGasPrice(ctx)
- if err != nil {
- b.metrics.TotalRPCErrors.Inc()
- return nil, err
- }
- return gasPrice, nil
-}
-
func (b *wrappedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
b.metrics.TotalRPCCalls.Inc()
- b.metrics.SuggestGasPriceCalls.Inc()
+ b.metrics.SuggestGasTipCapCalls.Inc()
gasTipCap, err := b.backend.SuggestGasTipCap(ctx)
if err != nil {
b.metrics.TotalRPCErrors.Inc()
@@ -156,11 +138,10 @@ func (b *wrappedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error)
}
return gasTipCap, nil
}
-
-func (b *wrappedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
+func (b *wrappedBackend) EstimateGasAtBlock(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (uint64, error) {
b.metrics.TotalRPCCalls.Inc()
b.metrics.EstimateGasCalls.Inc()
- gas, err = b.backend.EstimateGas(ctx, call)
+ gas, err := b.backend.EstimateGasAtBlock(ctx, msg, blockNumber)
if err != nil {
b.metrics.TotalRPCErrors.Inc()
return 0, err
@@ -201,21 +182,6 @@ func (b *wrappedBackend) ChainID(ctx context.Context) (*big.Int, error) {
return chainID, nil
}
-// BlockByNumber implements transaction.Backend.
-func (b *wrappedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
- b.metrics.TotalRPCCalls.Inc()
- b.metrics.BlockByNumberCalls.Inc()
- block, err := b.backend.BlockByNumber(ctx, number)
- if err != nil {
- if !errors.Is(err, ethereum.NotFound) {
- b.metrics.TotalRPCErrors.Inc()
- }
- return nil, err
- }
- return block, nil
-}
-
-func (b *wrappedBackend) Close() error {
+func (b *wrappedBackend) Close() {
b.backend.Close()
- return nil
}
diff --git a/pkg/util/syncutil/syncutil_test.go b/pkg/util/syncutil/syncutil_test.go
index ce05cec2dab..c73ed4cd980 100644
--- a/pkg/util/syncutil/syncutil_test.go
+++ b/pkg/util/syncutil/syncutil_test.go
@@ -7,23 +7,26 @@ package syncutil
import (
"sync"
"testing"
+ "testing/synctest"
"time"
)
func TestWaitWithTimeout(t *testing.T) {
var wg sync.WaitGroup
- if !WaitWithTimeout(&wg, 10*time.Millisecond) {
- t.Fatal("want timeout; have none")
- }
+ synctest.Test(t, func(t *testing.T) {
+ if !WaitWithTimeout(&wg, 10*time.Millisecond) {
+ t.Fatal("want timeout; have none")
+ }
- wg.Add(1)
- if WaitWithTimeout(&wg, 10*time.Millisecond) {
- t.Fatal("have timeout; want none")
- }
+ wg.Add(1)
+ if WaitWithTimeout(&wg, 10*time.Millisecond) {
+ t.Fatal("have timeout; want none")
+ }
- wg.Done()
- if !WaitWithTimeout(&wg, 10*time.Millisecond) {
- t.Fatal("want no timeout; have none")
- }
+ wg.Done()
+ if !WaitWithTimeout(&wg, 10*time.Millisecond) {
+ t.Fatal("want no timeout; have none")
+ }
+ })
}
diff --git a/pkg/util/testutil/pseudorand/reader_test.go b/pkg/util/testutil/pseudorand/reader_test.go
index 2abd6aaf382..90f5abe11ae 100644
--- a/pkg/util/testutil/pseudorand/reader_test.go
+++ b/pkg/util/testutil/pseudorand/reader_test.go
@@ -97,7 +97,7 @@ func TestReader(t *testing.T) {
}
})
t.Run("seek and match", func(t *testing.T) {
- for i := 0; i < 20; i++ {
+ for range 20 {
off := rand.Intn(size)
n := rand.Intn(size - off)
t.Run(fmt.Sprintf("off=%d n=%d", off, n), func(t *testing.T) {
diff --git a/pkg/util/testutil/racedetection/off.go b/pkg/util/testutil/racedetection/off.go
index d57125bfd03..cd7bfe36c55 100644
--- a/pkg/util/testutil/racedetection/off.go
+++ b/pkg/util/testutil/racedetection/off.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !race
-// +build !race
package racedetection
diff --git a/pkg/util/testutil/racedetection/on.go b/pkg/util/testutil/racedetection/on.go
index 92438ecfdc3..428ebb16800 100644
--- a/pkg/util/testutil/racedetection/on.go
+++ b/pkg/util/testutil/racedetection/on.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build race
-// +build race
package racedetection